idx
int64 0
7.85k
| idx_lca
int64 0
223
| offset
int64 162
55k
| repo
stringclasses 62
values | commit_hash
stringclasses 113
values | target_file
stringclasses 134
values | line_type_lca
stringclasses 7
values | ground_truth
stringlengths 1
46
| in_completions
bool 1
class | completion_type
stringclasses 6
values | non_dunder_count_intellij
int64 0
529
| non_dunder_count_jedi
int64 0
128
| start_with_
bool 2
classes | first_occurrence
bool 2
classes | intellij_completions
listlengths 1
532
| jedi_completions
listlengths 3
148
| prefix
stringlengths 162
55k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
446 | 17 | 6,674 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 17 | 17 | false | false |
[
"row_dim_name",
"uri",
"indent",
"attr_name",
"col_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.
|
447 | 17 | 6,764 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
uri
| true |
statement
| 17 | 17 | false | false |
[
"attr_name",
"uri",
"row_dim_name",
"col_dim_name",
"indent",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.
|
448 | 17 | 7,001 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
find_csr_chunk_size
| true |
function
| 6 | 12 | false | true |
[
"get_start_stamp",
"format_elapsed",
"get_sort_and_permutation",
"find_csr_chunk_size",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.
|
449 | 17 | 7,050 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
soma_options
| true |
statement
| 17 | 17 | false | false |
[
"to_csr_matrix",
"from_matrix",
"row_dim_name",
"ingest_data",
"col_dim_name",
"__init__",
"attr_name",
"create_empty_array",
"ingest_data_rows_chunked",
"ingest_data_whole",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.
|
450 | 17 | 7,063 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
goal_chunk_nnz
| true |
statement
| 8 | 8 | false | true |
[
"goal_chunk_nnz",
"write_X_chunked_if_csr",
"X_capacity",
"X_cell_order",
"X_tile_order",
"obs_extent",
"string_dim_zstd_level",
"var_extent",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "goal_chunk_nnz",
"type": "statement"
},
{
"name": "obs_extent",
"type": "statement"
},
{
"name": "string_dim_zstd_level",
"type": "statement"
},
{
"name": "var_extent",
"type": "statement"
},
{
"name": "write_X_chunked_if_csr",
"type": "statement"
},
{
"name": "X_capacity",
"type": "statement"
},
{
"name": "X_cell_order",
"type": "statement"
},
{
"name": "X_tile_order",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.
|
451 | 17 | 7,258 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
get_start_stamp
| true |
function
| 6 | 12 | false | false |
[
"get_start_stamp",
"find_csr_chunk_size",
"format_elapsed",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.
|
452 | 17 | 7,785 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | false |
[
"verbose",
"row_dim_name",
"attr_name",
"col_dim_name",
"ingest_data",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.
|
453 | 17 | 7,920 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 19 | 17 | false | false |
[
"attr_name",
"uri",
"row_dim_name",
"col_dim_name",
"ingest_data",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"verbose",
"for",
"yield",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.
|
454 | 17 | 8,102 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | false |
[
"verbose",
"row_dim_name",
"attr_name",
"col_dim_name",
"ingest_data",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.
|
455 | 17 | 8,142 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
format_elapsed
| true |
function
| 6 | 12 | false | false |
[
"format_elapsed",
"get_start_stamp",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.
|
456 | 17 | 8,168 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 17 | 17 | false | false |
[
"row_dim_name",
"uri",
"indent",
"attr_name",
"col_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.
|
457 | 17 | 8,231 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | false |
[
"verbose",
"row_dim_name",
"attr_name",
"col_dim_name",
"uri",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.
|
458 | 17 | 8,263 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
format_elapsed
| true |
function
| 6 | 12 | false | false |
[
"format_elapsed",
"get_start_stamp",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.
|
459 | 17 | 8,288 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 17 | 17 | false | false |
[
"row_dim_name",
"uri",
"indent",
"attr_name",
"col_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.
|
460 | 17 | 8,994 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | false |
[
"verbose",
"col_dim_name",
"row_dim_name",
"attr_name",
"uri",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.
|
461 | 17 | 9,024 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
get_start_stamp
| true |
function
| 6 | 12 | false | false |
[
"get_start_stamp",
"format_elapsed",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.
|
462 | 17 | 9,068 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 17 | 17 | false | false |
[
"uri",
"indent",
"col_dim_name",
"row_dim_name",
"attr_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.
|
463 | 17 | 9,093 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
uri
| true |
statement
| 17 | 17 | false | false |
[
"indent",
"uri",
"col_dim_name",
"row_dim_name",
"attr_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.
|
464 | 17 | 9,422 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
random
|
uri
| true |
statement
| 17 | 17 | false | false |
[
"uri",
"attr_name",
"col_dim_name",
"row_dim_name",
"indent",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.
|
465 | 17 | 10,559 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
row_dim_name
| true |
statement
| 17 | 17 | false | false |
[
"row_dim_name",
"col_dim_name",
"attr_name",
"ingest_data_rows_chunked",
"uri",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.uri) as A:
df = A[:]
# Now we need to convert from TileDB's string indices to CSR integer indices.
# Make a dict from string dimension values to integer indices.
#
# Example: suppose the sparse matrix looks like:
#
# S T U V
# A 4 . . 3
# B: 5 . 6 .
# C . 1 . 2
# D 8 7 . .
#
# The return value from the X[:] query is (obs_id,var_id,value) triples like
#
# A,S,4 A,V,3 B,S,5 B,U,6 C,V,2 C,T,1 D,S,8 D,T,7
#
# whereas scipy csr is going to want
#
# 0,0,4 0,3,3 1,0,5 1,2,6 2,3,2 2,1,1 3,0,8 3,1,7
#
# In order to accomplish this, we need to map ['A','B','C','D'] to [0,1,2,3] via {'A':0,
# 'B':1, 'C':2, 'D':3} and similarly for the other dimension.
row_labels_to_indices = dict(zip(row_labels, [i for i,e in enumerate(row_labels)]))
col_labels_to_indices = dict(zip(col_labels, [i for i,e in enumerate(col_labels)]))
# Apply the map.
obs_indices = [row_labels_to_indices[row_label] for row_label in df[self.
|
466 | 17 | 10,655 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
col_dim_name
| true |
statement
| 17 | 17 | false | false |
[
"col_dim_name",
"row_dim_name",
"attr_name",
"uri",
"ctx",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.uri) as A:
df = A[:]
# Now we need to convert from TileDB's string indices to CSR integer indices.
# Make a dict from string dimension values to integer indices.
#
# Example: suppose the sparse matrix looks like:
#
# S T U V
# A 4 . . 3
# B: 5 . 6 .
# C . 1 . 2
# D 8 7 . .
#
# The return value from the X[:] query is (obs_id,var_id,value) triples like
#
# A,S,4 A,V,3 B,S,5 B,U,6 C,V,2 C,T,1 D,S,8 D,T,7
#
# whereas scipy csr is going to want
#
# 0,0,4 0,3,3 1,0,5 1,2,6 2,3,2 2,1,1 3,0,8 3,1,7
#
# In order to accomplish this, we need to map ['A','B','C','D'] to [0,1,2,3] via {'A':0,
# 'B':1, 'C':2, 'D':3} and similarly for the other dimension.
row_labels_to_indices = dict(zip(row_labels, [i for i,e in enumerate(row_labels)]))
col_labels_to_indices = dict(zip(col_labels, [i for i,e in enumerate(col_labels)]))
# Apply the map.
obs_indices = [row_labels_to_indices[row_label] for row_label in df[self.row_dim_name]]
var_indices = [col_labels_to_indices[col_label] for col_label in df[self.
|
467 | 17 | 10,739 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
attr_name
| true |
statement
| 19 | 17 | false | false |
[
"col_dim_name",
"row_dim_name",
"uri",
"attr_name",
"indent",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"for",
"yield",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.uri) as A:
df = A[:]
# Now we need to convert from TileDB's string indices to CSR integer indices.
# Make a dict from string dimension values to integer indices.
#
# Example: suppose the sparse matrix looks like:
#
# S T U V
# A 4 . . 3
# B: 5 . 6 .
# C . 1 . 2
# D 8 7 . .
#
# The return value from the X[:] query is (obs_id,var_id,value) triples like
#
# A,S,4 A,V,3 B,S,5 B,U,6 C,V,2 C,T,1 D,S,8 D,T,7
#
# whereas scipy csr is going to want
#
# 0,0,4 0,3,3 1,0,5 1,2,6 2,3,2 2,1,1 3,0,8 3,1,7
#
# In order to accomplish this, we need to map ['A','B','C','D'] to [0,1,2,3] via {'A':0,
# 'B':1, 'C':2, 'D':3} and similarly for the other dimension.
row_labels_to_indices = dict(zip(row_labels, [i for i,e in enumerate(row_labels)]))
col_labels_to_indices = dict(zip(col_labels, [i for i,e in enumerate(col_labels)]))
# Apply the map.
obs_indices = [row_labels_to_indices[row_label] for row_label in df[self.row_dim_name]]
var_indices = [col_labels_to_indices[col_label] for col_label in df[self.col_dim_name]]
retval = scipy.sparse.csr_matrix(
(list(df[self.
|
468 | 17 | 10,819 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | false |
[
"verbose",
"col_dim_name",
"row_dim_name",
"uri",
"attr_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.uri) as A:
df = A[:]
# Now we need to convert from TileDB's string indices to CSR integer indices.
# Make a dict from string dimension values to integer indices.
#
# Example: suppose the sparse matrix looks like:
#
# S T U V
# A 4 . . 3
# B: 5 . 6 .
# C . 1 . 2
# D 8 7 . .
#
# The return value from the X[:] query is (obs_id,var_id,value) triples like
#
# A,S,4 A,V,3 B,S,5 B,U,6 C,V,2 C,T,1 D,S,8 D,T,7
#
# whereas scipy csr is going to want
#
# 0,0,4 0,3,3 1,0,5 1,2,6 2,3,2 2,1,1 3,0,8 3,1,7
#
# In order to accomplish this, we need to map ['A','B','C','D'] to [0,1,2,3] via {'A':0,
# 'B':1, 'C':2, 'D':3} and similarly for the other dimension.
row_labels_to_indices = dict(zip(row_labels, [i for i,e in enumerate(row_labels)]))
col_labels_to_indices = dict(zip(col_labels, [i for i,e in enumerate(col_labels)]))
# Apply the map.
obs_indices = [row_labels_to_indices[row_label] for row_label in df[self.row_dim_name]]
var_indices = [col_labels_to_indices[col_label] for col_label in df[self.col_dim_name]]
retval = scipy.sparse.csr_matrix(
(list(df[self.attr_name]), (list(obs_indices), list(var_indices)))
)
if self.
|
469 | 17 | 10,851 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
format_elapsed
| true |
function
| 6 | 12 | false | false |
[
"format_elapsed",
"get_start_stamp",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.uri) as A:
df = A[:]
# Now we need to convert from TileDB's string indices to CSR integer indices.
# Make a dict from string dimension values to integer indices.
#
# Example: suppose the sparse matrix looks like:
#
# S T U V
# A 4 . . 3
# B: 5 . 6 .
# C . 1 . 2
# D 8 7 . .
#
# The return value from the X[:] query is (obs_id,var_id,value) triples like
#
# A,S,4 A,V,3 B,S,5 B,U,6 C,V,2 C,T,1 D,S,8 D,T,7
#
# whereas scipy csr is going to want
#
# 0,0,4 0,3,3 1,0,5 1,2,6 2,3,2 2,1,1 3,0,8 3,1,7
#
# In order to accomplish this, we need to map ['A','B','C','D'] to [0,1,2,3] via {'A':0,
# 'B':1, 'C':2, 'D':3} and similarly for the other dimension.
row_labels_to_indices = dict(zip(row_labels, [i for i,e in enumerate(row_labels)]))
col_labels_to_indices = dict(zip(col_labels, [i for i,e in enumerate(col_labels)]))
# Apply the map.
obs_indices = [row_labels_to_indices[row_label] for row_label in df[self.row_dim_name]]
var_indices = [col_labels_to_indices[col_label] for col_label in df[self.col_dim_name]]
retval = scipy.sparse.csr_matrix(
(list(df[self.attr_name]), (list(obs_indices), list(var_indices)))
)
if self.verbose:
print(util.
|
470 | 17 | 10,877 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 17 | 17 | false | false |
[
"uri",
"indent",
"col_dim_name",
"row_dim_name",
"attr_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.uri) as A:
df = A[:]
# Now we need to convert from TileDB's string indices to CSR integer indices.
# Make a dict from string dimension values to integer indices.
#
# Example: suppose the sparse matrix looks like:
#
# S T U V
# A 4 . . 3
# B: 5 . 6 .
# C . 1 . 2
# D 8 7 . .
#
# The return value from the X[:] query is (obs_id,var_id,value) triples like
#
# A,S,4 A,V,3 B,S,5 B,U,6 C,V,2 C,T,1 D,S,8 D,T,7
#
# whereas scipy csr is going to want
#
# 0,0,4 0,3,3 1,0,5 1,2,6 2,3,2 2,1,1 3,0,8 3,1,7
#
# In order to accomplish this, we need to map ['A','B','C','D'] to [0,1,2,3] via {'A':0,
# 'B':1, 'C':2, 'D':3} and similarly for the other dimension.
row_labels_to_indices = dict(zip(row_labels, [i for i,e in enumerate(row_labels)]))
col_labels_to_indices = dict(zip(col_labels, [i for i,e in enumerate(col_labels)]))
# Apply the map.
obs_indices = [row_labels_to_indices[row_label] for row_label in df[self.row_dim_name]]
var_indices = [col_labels_to_indices[col_label] for col_label in df[self.col_dim_name]]
retval = scipy.sparse.csr_matrix(
(list(df[self.attr_name]), (list(obs_indices), list(var_indices)))
)
if self.verbose:
print(util.format_elapsed(s, f"{self.
|
471 | 17 | 10,902 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
uri
| true |
statement
| 17 | 17 | false | false |
[
"indent",
"uri",
"col_dim_name",
"row_dim_name",
"attr_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.verbose:
print(f"{self.indent}START __ingest_coo_data_string_dims_rows_chunked")
with tiledb.open(self.uri, mode="w") as A:
nrow = len(sorted_row_names)
i = 0
while i < nrow:
# Find a number of CSR rows which will result in a desired nnz for the chunk.
chunk_size = util.find_csr_chunk_size(matrix, permutation, i, self.soma_options.goal_chunk_nnz)
i2 = i + chunk_size
# Convert the chunk to a COO matrix.
chunk_coo = matrix[permutation[i:i2]].tocoo()
s2 = util.get_start_stamp()
# Write the chunk-COO to TileDB.
d0 = sorted_row_names[chunk_coo.row + i]
d1 = col_names[chunk_coo.col]
if len(d0) == 0:
continue
# Python ranges are (lo, hi) with lo inclusive and hi exclusive. But saying that
# makes us look buggy if we say we're ingesting chunk 0:18 and then 18:32.
# Instead, print doubly-inclusive lo..hi like 0..17 and 18..31.
if self.verbose:
print("%sSTART chunk rows %d..%d of %d, obs_ids %s..%s, nnz=%d, %7.3f%%" %
(self.indent, i, i2-1, nrow, d0[0], d0[-1], chunk_coo.nnz, 100*(i2-1)/nrow))
# Write a TileDB fragment
A[d0, d1] = chunk_coo.data
if self.verbose:
print(util.format_elapsed(s2,f"{self.indent}FINISH chunk"))
i = i2
if self.verbose:
print(util.format_elapsed(s,f"{self.indent}FINISH __ingest_coo_data_string_dims_rows_chunked"))
# ----------------------------------------------------------------
def to_csr_matrix(self, row_labels, col_labels):
"""
Reads the TileDB array storage for the storage and returns a sparse CSR matrix. The
row/columns labels should be obs,var labels if the AssayMatrix is X, or obs,obs labels if
the AssayMatrix is obsp, or var,var labels if the AssayMatrix is varp.
Note in all cases that TileDB will have sorted the row and column labels; they won't
be in the same order as they were in any anndata object which was used to create the
TileDB storage.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
# Since the TileDB array is sparse, with two string dimensions, we get back a dict:
# * 'obs_id' key is a sequence of dim0 coordinates for X data.
# * 'var_id' key is a sequence of dim1 coordinates for X data.
# * 'values' key is a sequence of X data values.
with tiledb.open(self.uri) as A:
df = A[:]
# Now we need to convert from TileDB's string indices to CSR integer indices.
# Make a dict from string dimension values to integer indices.
#
# Example: suppose the sparse matrix looks like:
#
# S T U V
# A 4 . . 3
# B: 5 . 6 .
# C . 1 . 2
# D 8 7 . .
#
# The return value from the X[:] query is (obs_id,var_id,value) triples like
#
# A,S,4 A,V,3 B,S,5 B,U,6 C,V,2 C,T,1 D,S,8 D,T,7
#
# whereas scipy csr is going to want
#
# 0,0,4 0,3,3 1,0,5 1,2,6 2,3,2 2,1,1 3,0,8 3,1,7
#
# In order to accomplish this, we need to map ['A','B','C','D'] to [0,1,2,3] via {'A':0,
# 'B':1, 'C':2, 'D':3} and similarly for the other dimension.
row_labels_to_indices = dict(zip(row_labels, [i for i,e in enumerate(row_labels)]))
col_labels_to_indices = dict(zip(col_labels, [i for i,e in enumerate(col_labels)]))
# Apply the map.
obs_indices = [row_labels_to_indices[row_label] for row_label in df[self.row_dim_name]]
var_indices = [col_labels_to_indices[col_label] for col_label in df[self.col_dim_name]]
retval = scipy.sparse.csr_matrix(
(list(df[self.attr_name]), (list(obs_indices), list(var_indices)))
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH read {self.
|
472 | 18 | 646 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
commited
|
__init__
| true |
function
| 8 | 8 | true | true |
[
"ctx",
"uri",
"indent",
"verbose",
"exists",
"name",
"object_type",
"soma_options",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().
|
473 | 18 | 936 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
verbose
| true |
statement
| 15 | 15 | false | true |
[
"verbose",
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.
|
474 | 18 | 966 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
inproject
|
get_start_stamp
| true |
function
| 6 | 12 | false | true |
[
"get_start_stamp",
"format_elapsed",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.
|
475 | 18 | 1,010 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
non_informative
|
indent
| true |
statement
| 15 | 15 | false | true |
[
"uri",
"indent",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.
|
476 | 18 | 1,055 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
non_informative
|
uri
| true |
statement
| 15 | 15 | false | true |
[
"indent",
"uri",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.
|
477 | 18 | 1,112 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"ctx",
"from_numpy_ndarray",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.
|
478 | 18 | 1,226 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
ctx
| true |
statement
| 15 | 15 | false | true |
[
"ctx",
"uri",
"from_numpy_ndarray",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.
|
479 | 18 | 1,257 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
random
|
verbose
| true |
statement
| 15 | 15 | false | false |
[
"verbose",
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.
|
480 | 18 | 1,289 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
format_elapsed
| true |
function
| 6 | 12 | false | true |
[
"format_elapsed",
"get_start_stamp",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.
|
481 | 18 | 1,315 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
indent
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.
|
482 | 18 | 1,360 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"indent",
"uri",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.
|
483 | 18 | 1,846 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
inproject
|
_to_tiledb_supported_array_type
| true |
function
| 6 | 12 | true | true |
[
"get_start_stamp",
"format_elapsed",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util.
|
484 | 18 | 1,900 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
from_numpy_ndarray
| true |
function
| 15 | 15 | false | true |
[
"from_numpy_ndarray",
"uri",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.
|
485 | 18 | 2,036 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
from_numpy_ndarray
| true |
function
| 15 | 15 | false | false |
[
"from_numpy_ndarray",
"uri",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.
|
486 | 18 | 2,268 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
infile
|
from_numpy_ndarray
| true |
function
| 15 | 15 | false | false |
[
"from_numpy_ndarray",
"uri",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.
|
487 | 18 | 2,502 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
from_numpy_ndarray
| true |
function
| 15 | 15 | false | false |
[
"from_numpy_ndarray",
"uri",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.
|
488 | 18 | 2,650 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
inproject
|
_to_tiledb_supported_array_type
| true |
function
| 6 | 12 | true | false |
[
"get_start_stamp",
"format_elapsed",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util.
|
489 | 18 | 2,704 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
from_numpy_ndarray
| true |
function
| 15 | 15 | false | false |
[
"from_numpy_ndarray",
"uri",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.
|
490 | 18 | 3,173 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
verbose
| true |
statement
| 15 | 15 | false | false |
[
"verbose",
"uri",
"ctx",
"indent",
"exists",
"__init__",
"create_empty_array_for_csr",
"from_numpy_ndarray",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.
|
491 | 18 | 3,203 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
get_start_stamp
| true |
function
| 6 | 12 | false | false |
[
"get_start_stamp",
"format_elapsed",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.
|
492 | 18 | 3,247 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
indent
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"ctx",
"verbose",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_numpy_ndarray",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.
|
493 | 18 | 3,294 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"indent",
"uri",
"ctx",
"from_numpy_ndarray",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.
|
494 | 18 | 3,914 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"ctx",
"from_numpy_ndarray",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.
|
495 | 18 | 3,939 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
ctx
| true |
statement
| 15 | 15 | false | false |
[
"ctx",
"uri",
"from_numpy_ndarray",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.
|
496 | 18 | 3,961 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
verbose
| true |
statement
| 15 | 15 | false | false |
[
"verbose",
"uri",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_numpy_ndarray",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.
|
497 | 18 | 3,993 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
format_elapsed
| true |
function
| 6 | 12 | false | false |
[
"format_elapsed",
"get_start_stamp",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.
|
498 | 18 | 4,019 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
indent
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"ctx",
"verbose",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_numpy_ndarray",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.
|
499 | 18 | 4,066 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"indent",
"uri",
"ctx",
"verbose",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_numpy_ndarray",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.
|
500 | 18 | 4,406 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
verbose
| true |
statement
| 15 | 15 | false | false |
[
"verbose",
"from_numpy_ndarray",
"uri",
"ctx",
"indent",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.
|
501 | 18 | 4,436 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
get_start_stamp
| true |
function
| 6 | 12 | false | false |
[
"get_start_stamp",
"format_elapsed",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.
|
502 | 18 | 4,480 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
indent
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"from_numpy_ndarray",
"ctx",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.
|
503 | 18 | 4,530 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"indent",
"uri",
"from_numpy_ndarray",
"ingest_data_from_csr",
"ctx",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.
|
504 | 18 | 4,587 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
exists
| true |
function
| 15 | 15 | false | true |
[
"verbose",
"uri",
"from_numpy_ndarray",
"indent",
"ctx",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.
|
505 | 18 | 4,617 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
verbose
| true |
statement
| 15 | 15 | false | false |
[
"verbose",
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.
|
506 | 18 | 4,656 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
indent
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"from_numpy_ndarray",
"ctx",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.
|
507 | 18 | 4,693 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"indent",
"uri",
"from_numpy_ndarray",
"exists",
"ctx",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.
|
508 | 18 | 4,731 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
infile
|
create_empty_array_for_csr
| true |
function
| 15 | 15 | false | true |
[
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.
|
509 | 18 | 4,805 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
infile
|
ingest_data_from_csr
| true |
function
| 15 | 15 | false | true |
[
"from_numpy_ndarray",
"uri",
"indent",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.
|
510 | 18 | 4,848 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
verbose
| true |
statement
| 15 | 15 | false | false |
[
"verbose",
"uri",
"from_numpy_ndarray",
"indent",
"ctx",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.
|
511 | 18 | 4,880 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
format_elapsed
| true |
function
| 6 | 12 | false | false |
[
"format_elapsed",
"get_start_stamp",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.
|
512 | 18 | 4,906 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
indent
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"from_numpy_ndarray",
"ctx",
"ingest_data_from_csr",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.
|
513 | 18 | 4,956 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"indent",
"uri",
"from_numpy_ndarray",
"ingest_data_from_csr",
"ctx",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.
|
514 | 18 | 5,746 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
ctx
| true |
statement
| 15 | 15 | false | false |
[
"ctx",
"uri",
"indent",
"from_numpy_ndarray",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.
|
515 | 18 | 5,858 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
ctx
| true |
statement
| 15 | 15 | false | false |
[
"ctx",
"uri",
"from_numpy_ndarray",
"to_matrix",
"indent",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.
|
516 | 18 | 6,243 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
ctx
| true |
statement
| 15 | 15 | false | false |
[
"ctx",
"uri",
"indent",
"from_numpy_ndarray",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.
|
517 | 18 | 6,290 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
common
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"ctx",
"from_numpy_ndarray",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.
|
518 | 18 | 6,309 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
common
|
ctx
| true |
statement
| 15 | 15 | false | false |
[
"ctx",
"uri",
"from_numpy_ndarray",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.
|
519 | 18 | 6,757 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"from_numpy_ndarray",
"ctx",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.
|
520 | 18 | 6,781 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
ctx
| true |
statement
| 15 | 15 | false | false |
[
"ctx",
"uri",
"from_numpy_ndarray",
"indent",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.
|
521 | 18 | 7,379 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
verbose
| true |
statement
| 15 | 15 | false | false |
[
"verbose",
"uri",
"ctx",
"indent",
"from_numpy_ndarray",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.
|
522 | 18 | 7,410 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
inproject
|
get_start_stamp
| true |
function
| 6 | 12 | false | false |
[
"get_start_stamp",
"format_elapsed",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.
|
523 | 18 | 7,454 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
indent
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"ctx",
"from_numpy_ndarray",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.get_start_stamp()
print(f"{self.
|
524 | 18 | 7,479 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"indent",
"uri",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.get_start_stamp()
print(f"{self.indent}START read {self.
|
525 | 18 | 7,517 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
with tiledb.open(self.
|
526 | 18 | 7,616 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
verbose
| true |
statement
| 15 | 15 | false | false |
[
"verbose",
"uri",
"indent",
"ctx",
"from_numpy_ndarray",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
with tiledb.open(self.uri) as A:
df = pd.DataFrame(A[:])
retval = df.to_numpy()
if self.
|
527 | 18 | 7,648 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
format_elapsed
| true |
function
| 6 | 12 | false | false |
[
"format_elapsed",
"get_start_stamp",
"_to_tiledb_supported_array_type",
"Optional",
"find_csr_chunk_size",
"get_sort_and_permutation"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
with tiledb.open(self.uri) as A:
df = pd.DataFrame(A[:])
retval = df.to_numpy()
if self.verbose:
print(util.
|
528 | 18 | 7,675 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
indent
| true |
statement
| 15 | 15 | false | false |
[
"uri",
"indent",
"ctx",
"from_numpy_ndarray",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
with tiledb.open(self.uri) as A:
df = pd.DataFrame(A[:])
retval = df.to_numpy()
if self.verbose:
print(util.format_elapsed(s2, f"{self.
|
529 | 18 | 7,700 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/uns_array.py
|
Unknown
|
uri
| true |
statement
| 15 | 15 | false | false |
[
"indent",
"uri",
"from_numpy_ndarray",
"ctx",
"verbose",
"__init__",
"create_empty_array_for_csr",
"from_pandas_dataframe",
"from_scipy_csr",
"ingest_data_from_csr",
"maybe_from_numpyable_object",
"to_matrix",
"exists",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "create_empty_array_for_csr",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_numpy_ndarray",
"type": "function"
},
{
"name": "from_pandas_dataframe",
"type": "function"
},
{
"name": "from_scipy_csr",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data_from_csr",
"type": "function"
},
{
"name": "maybe_from_numpyable_object",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
import tiledbsc.util as util
import numpy as np
import scipy
import pandas as pd
from typing import Optional
class UnsArray(TileDBArray):
"""
Holds TileDB storage for an array obtained from the nested `anndata.uns` field.
"""
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
# ----------------------------------------------------------------
def from_pandas_dataframe(self, df: pd.DataFrame):
"""
Ingests an `UnsArray` into TileDB storage, given a pandas.DataFrame.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING PANDAS.DATAFRAME {self.uri}")
tiledb.from_pandas(
uri=self.uri,
dataframe=df,
sparse=True,
allows_duplicates=False,
ctx=self.ctx
)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING PANDAS.DATAFRAME {self.uri}"))
# ----------------------------------------------------------------
def maybe_from_numpyable_object(self, obj) -> bool:
"""
Nominally for ingest of `uns` nested data from anndata objects. Handles scalar or array values
-- the former, by wrapping in a 1D array. Maps to TileDB / tiledb.from_numpy storage semantics,
including UTF-8 handling. Supports dtypes like
"""
if isinstance(obj, np.ndarray):
arr = util._to_tiledb_supported_array_type(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, list):
arr = np.asarray(obj)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, np.str_):
# Needs explicit cast from numpy.str_ to str for tiledb.from_numpy
arr = np.asarray([obj]).astype(str)
self.from_numpy_ndarray(arr)
return True
elif isinstance(obj, int) or isinstance(obj, float) or isinstance(obj, str):
# Nominally this is unit-test data
arr = np.asarray([obj])
self.from_numpy_ndarray(arr)
return True
elif 'numpy' in str(type(obj)):
arr = np.asarray([obj])
arr = util._to_tiledb_supported_array_type(arr)
self.from_numpy_ndarray(arr)
return True
else:
return False
# ----------------------------------------------------------------
def from_numpy_ndarray(self, arr: np.ndarray) -> None:
"""
Writes a numpy.ndarray to a TileDB array, nominally for ingest of `uns` nested data from anndata
objects. Mostly tiledb.from_numpy, but with some necessary handling for data with UTF-8 values.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM NUMPY.NDARRAY {self.uri}")
if 'numpy' in str(type(arr)) and str(arr.dtype).startswith('<U'):
# Note arr.astype('str') does not lead to a successfuly tiledb.from_numpy.
arr = np.array(arr, dtype='O')
# overwrite = False
# if self.exists:
# overwrite = True
# if self.verbose:
# print(f"{self.indent}Re-using existing array {self.uri}")
# tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx, overwrite=overwrite)
# TODO: find the right syntax for update-in-place (tiledb.from_pandas uses `mode`)
tiledb.from_numpy(uri=self.uri, array=arr, ctx=self.ctx)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM NUMPY.NDARRAY {self.uri}"))
# ----------------------------------------------------------------
def from_scipy_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING FROM SCIPY.SPARSE.CSR {self.uri}")
nrows, ncols = csr.shape
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array_for_csr("data", csr.dtype, nrows, ncols)
self.ingest_data_from_csr(csr)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING FROM SCIPY.SPARSE.CSR {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array_for_csr(self, attr_name: str, matrix_dtype: np.dtype, nrows: int, ncols: int):
"""
Create a TileDB 2D sparse array with int dimensions and a single attribute.
Nominally used for uns data.
:param matrix_dtype: datatype of the matrix
:param nrows: number of rows in the matrix
:param ncols: number of columns in the matrix
"""
assert isinstance(attr_name, str)
dom = tiledb.Domain(
tiledb.Dim(name="dim0", domain=(0, nrows-1), dtype="int32", filters=[tiledb.RleFilter()]),
tiledb.Dim(name="dim1", domain=(0, ncols-1), dtype="int32", filters=[tiledb.ZstdFilter()]),
ctx=self.ctx
)
att = tiledb.Attr(attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=100000,
cell_order='row-major',
tile_order='col-major',
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data_from_csr(self, csr: scipy.sparse.csr_matrix):
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param csr: Matrix-like object coercible to a scipy coo_matrix.
"""
mat_coo = scipy.sparse.coo_matrix(csr)
d0 = mat_coo.row
d1 = mat_coo.col
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# TODO: regardless of which matrix type (numpy.ndarray, scipy.sparse.csr_matrix, etc) was
# written in, this returns always the same type on readback. Perhaps at write time we can save a
# metadata tag with the provenance-type of the array, and on readback, try to return the same
# type.
def to_matrix(self):
"""
Reads an uns array from TileDB storage and returns a matrix -- currently, always as numpy.ndarray.
"""
if self.verbose:
s2 = util.get_start_stamp()
print(f"{self.indent}START read {self.uri}")
with tiledb.open(self.uri) as A:
df = pd.DataFrame(A[:])
retval = df.to_numpy()
if self.verbose:
print(util.format_elapsed(s2, f"{self.indent}FINISH read {self.
|
530 | 19 | 180 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
SOMA
| true |
class
| 34 | 35 | false | true |
[
"SOMA",
"soma",
"util",
"None",
"util_ann",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"soma_options",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.
|
532 | 19 | 335 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
util
| true |
module
| 33 | 35 | false | true |
[
"util",
"soma",
"SOMA",
"util_ann",
"SOMAOptions",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"soma_options",
"SOMACollection",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.
|
533 | 19 | 340 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
get_start_stamp
| true |
function
| 10 | 16 | false | true |
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.
|
536 | 19 | 469 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
util
| true |
module
| 33 | 35 | false | false |
[
"util",
"soma",
"SOMA",
"util_ann",
"SOMAOptions",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"soma_options",
"SOMACollection",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.
|
537 | 19 | 474 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
get_start_stamp
| true |
function
| 10 | 16 | false | false |
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.
|
540 | 19 | 649 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
util
| true |
module
| 33 | 35 | false | false |
[
"util",
"soma",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.
|
541 | 19 | 654 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
inproject
|
format_elapsed
| true |
function
| 10 | 16 | false | true |
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.
|
544 | 19 | 850 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
util
| true |
module
| 33 | 35 | false | false |
[
"util",
"soma",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.
|
545 | 19 | 855 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
format_elapsed
| true |
function
| 10 | 16 | false | false |
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.
|
547 | 19 | 1,063 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
SOMA
| true |
class
| 34 | 35 | false | false |
[
"SOMA",
"soma",
"util",
"None",
"util_ann",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"soma_options",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.
|
549 | 19 | 1,215 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
util
| true |
module
| 33 | 35 | false | false |
[
"util",
"soma",
"SOMA",
"util_ann",
"SOMAOptions",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"soma_options",
"SOMACollection",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.
|
550 | 19 | 1,220 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
get_start_stamp
| true |
function
| 10 | 16 | false | false |
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.
|
553 | 19 | 1,348 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
util
| true |
module
| 33 | 35 | false | false |
[
"util",
"soma",
"SOMA",
"util_ann",
"SOMAOptions",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"soma_options",
"SOMACollection",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.
|
554 | 19 | 1,353 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
get_start_stamp
| true |
function
| 10 | 16 | false | false |
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.
|
557 | 19 | 1,534 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
util
| true |
module
| 33 | 35 | false | false |
[
"util",
"soma",
"SOMA",
"util_ann",
"soma_options",
"annotation_matrix",
"annotation_matrix_group",
"annotation_pairwise_matrix_group",
"AnnotationMatrix",
"AnnotationMatrixGroup",
"AnnotationPairwiseMatrixGroup",
"assay_matrix",
"assay_matrix_group",
"AssayMatrix",
"AssayMatrixGroup",
"describe_ann_file",
"raw_group",
"RawGroup",
"show_single_cell_group",
"soma_collection",
"SOMACollection",
"SOMAOptions",
"tiledb_array",
"tiledb_group",
"tiledb_object",
"TileDBArray",
"TileDBGroup",
"TileDBObject",
"uns_array",
"uns_group",
"UnsArray",
"UnsGroup",
"util_tiledb"
] |
[
{
"name": "annotation_dataframe",
"type": "module"
},
{
"name": "annotation_matrix",
"type": "module"
},
{
"name": "annotation_matrix_group",
"type": "module"
},
{
"name": "annotation_pairwise_matrix_group",
"type": "module"
},
{
"name": "AnnotationMatrix",
"type": "class"
},
{
"name": "AnnotationMatrixGroup",
"type": "class"
},
{
"name": "AnnotationPairwiseMatrixGroup",
"type": "class"
},
{
"name": "assay_matrix",
"type": "module"
},
{
"name": "assay_matrix_group",
"type": "module"
},
{
"name": "AssayMatrix",
"type": "class"
},
{
"name": "AssayMatrixGroup",
"type": "class"
},
{
"name": "describe_ann_file",
"type": "function"
},
{
"name": "io",
"type": "module"
},
{
"name": "raw_group",
"type": "module"
},
{
"name": "RawGroup",
"type": "class"
},
{
"name": "show_single_cell_group",
"type": "function"
},
{
"name": "SOMA",
"type": "class"
},
{
"name": "soma",
"type": "module"
},
{
"name": "soma_collection",
"type": "module"
},
{
"name": "soma_options",
"type": "module"
},
{
"name": "SOMACollection",
"type": "class"
},
{
"name": "SOMAOptions",
"type": "class"
},
{
"name": "tiledb_array",
"type": "module"
},
{
"name": "tiledb_group",
"type": "module"
},
{
"name": "tiledb_object",
"type": "module"
},
{
"name": "TileDBArray",
"type": "class"
},
{
"name": "TileDBGroup",
"type": "class"
},
{
"name": "TileDBObject",
"type": "class"
},
{
"name": "uns_array",
"type": "module"
},
{
"name": "uns_group",
"type": "module"
},
{
"name": "UnsArray",
"type": "class"
},
{
"name": "UnsGroup",
"type": "class"
},
{
"name": "util",
"type": "module"
},
{
"name": "util_ann",
"type": "module"
},
{
"name": "util_tiledb",
"type": "module"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.
|
558 | 19 | 1,539 |
single-cell-data__tiledb-soma
|
e64f89e6251f221bd91c49a997d7fb67de3ed349
|
apis/python/src/tiledbsc/io/anndata.py
|
Unknown
|
format_elapsed
| true |
function
| 10 | 16 | false | false |
[
"format_elapsed",
"get_start_stamp",
"List",
"Optional",
"ETATracker",
"_find_csc_chunk_size",
"_find_csr_chunk_size",
"_get_sort_and_permutation",
"_to_tiledb_supported_array_type",
"_X_and_ids_to_coo"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "ETATracker",
"type": "class"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "List",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_find_csc_chunk_size",
"type": "function"
},
{
"name": "_find_csr_chunk_size",
"type": "function"
},
{
"name": "_get_sort_and_permutation",
"type": "function"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "_X_and_ids_to_coo",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledbsc
import tiledbsc.util
import tiledbsc.util_ann
import anndata as ad
# ----------------------------------------------------------------
def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads an .h5ad file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = ad.read_h5ad(input_path)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"{soma._indent}FINISH READING {input_path}"
)
)
from_anndata(soma, anndata)
if soma._verbose:
print(
tiledbsc.util.format_elapsed(
s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}"
)
)
# ----------------------------------------------------------------
def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None:
"""
Reads a 10X file and writes to a TileDB group structure.
"""
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"START SOMA.from_10x {input_path} -> {soma.uri}")
if soma._verbose:
s = tiledbsc.util.get_start_stamp()
print(f"{soma._indent}START READING {input_path}")
anndata = scanpy.read_10x_h5(input_path)
if soma._verbose:
print(
tiledbsc.util.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.