code
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
226
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import io
from google.cloud.aiplatform_v1.types import model as gca_model
from google.cloud.aiplatform_v1.types import model_evaluation
from google.cloud.aiplatform_v1.types import model_evaluation_slice
from google.cloud.aiplatform_v1.types import operation
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"UploadModelRequest",
"UploadModelOperationMetadata",
"UploadModelResponse",
"GetModelRequest",
"ListModelsRequest",
"ListModelsResponse",
"UpdateModelRequest",
"DeleteModelRequest",
"ExportModelRequest",
"ExportModelOperationMetadata",
"ExportModelResponse",
"GetModelEvaluationRequest",
"ListModelEvaluationsRequest",
"ListModelEvaluationsResponse",
"GetModelEvaluationSliceRequest",
"ListModelEvaluationSlicesRequest",
"ListModelEvaluationSlicesResponse",
},
)
class UploadModelRequest(proto.Message):
r"""Request message for
[ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel].
Attributes:
parent (str):
Required. The resource name of the Location into which to
upload the Model. Format:
``projects/{project}/locations/{location}``
model (google.cloud.aiplatform_v1.types.Model):
Required. The Model to create.
"""
parent = proto.Field(proto.STRING, number=1,)
model = proto.Field(proto.MESSAGE, number=2, message=gca_model.Model,)
class UploadModelOperationMetadata(proto.Message):
r"""Details of
[ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]
operation.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
The common part of the operation metadata.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class UploadModelResponse(proto.Message):
r"""Response message of
[ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]
operation.
Attributes:
model (str):
The name of the uploaded Model resource. Format:
``projects/{project}/locations/{location}/models/{model}``
"""
model = proto.Field(proto.STRING, number=1,)
class GetModelRequest(proto.Message):
r"""Request message for
[ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel].
Attributes:
name (str):
Required. The name of the Model resource. Format:
``projects/{project}/locations/{location}/models/{model}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListModelsRequest(proto.Message):
r"""Request message for
[ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels].
Attributes:
parent (str):
Required. The resource name of the Location to list the
Models from. Format:
``projects/{project}/locations/{location}``
filter (str):
An expression for filtering the results of the request. For
field names both snake_case and camelCase are supported.
- ``model`` supports = and !=. ``model`` represents the
Model ID, i.e. the last segment of the Model's [resource
name][google.cloud.aiplatform.v1.Model.name].
- ``display_name`` supports = and !=
- ``labels`` supports general map functions that is:
- ``labels.key=value`` - key:value equality
- \`labels.key:\* or labels:key - key existence
- A key including a space must be quoted.
``labels."a key"``.
Some examples:
- ``model=1234``
- ``displayName="myDisplayName"``
- ``labels.myKey="myValue"``
page_size (int):
The standard list page size.
page_token (str):
The standard list page token. Typically obtained via
[ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token]
of the previous
[ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]
call.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
order_by (str):
A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for
descending. Supported fields:
- ``display_name``
- ``create_time``
- ``update_time``
Example: ``display_name, create_time desc``.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
order_by = proto.Field(proto.STRING, number=6,)
class ListModelsResponse(proto.Message):
r"""Response message for
[ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]
Attributes:
models (Sequence[google.cloud.aiplatform_v1.types.Model]):
List of Models in the requested page.
next_page_token (str):
A token to retrieve next page of results. Pass to
[ListModelsRequest.page_token][google.cloud.aiplatform.v1.ListModelsRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
models = proto.RepeatedField(proto.MESSAGE, number=1, message=gca_model.Model,)
next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateModelRequest(proto.Message):
r"""Request message for
[ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel].
Attributes:
model (google.cloud.aiplatform_v1.types.Model):
Required. The Model which replaces the
resource on the server.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For the
``FieldMask`` definition, see
[google.protobuf.FieldMask][google.protobuf.FieldMask].
"""
model = proto.Field(proto.MESSAGE, number=1, message=gca_model.Model,)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class DeleteModelRequest(proto.Message):
r"""Request message for
[ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel].
Attributes:
name (str):
Required. The name of the Model resource to be deleted.
Format:
``projects/{project}/locations/{location}/models/{model}``
"""
name = proto.Field(proto.STRING, number=1,)
class ExportModelRequest(proto.Message):
r"""Request message for
[ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel].
Attributes:
name (str):
Required. The resource name of the Model to
export.
output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig):
Required. The desired output location and
configuration.
"""
class OutputConfig(proto.Message):
r"""Output configuration for the Model export.
Attributes:
export_format_id (str):
The ID of the format in which the Model must be exported.
Each Model lists the [export formats it
supports][google.cloud.aiplatform.v1.Model.supported_export_formats].
If no value is provided here, then the first from the list
of the Model's supported formats is used by default.
artifact_destination (google.cloud.aiplatform_v1.types.GcsDestination):
The Cloud Storage location where the Model artifact is to be
written to. Under the directory given as the destination a
new one with name
"``model-export-<model-display-name>-<timestamp-of-export-call>``",
where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
format, will be created. Inside, the Model and any of its
supporting files will be written. This field should only be
set when the ``exportableContent`` field of the
[Model.supported_export_formats] object contains
``ARTIFACT``.
image_destination (google.cloud.aiplatform_v1.types.ContainerRegistryDestination):
The Google Container Registry or Artifact Registry uri where
the Model container image will be copied to. This field
should only be set when the ``exportableContent`` field of
the [Model.supported_export_formats] object contains
``IMAGE``.
"""
export_format_id = proto.Field(proto.STRING, number=1,)
artifact_destination = proto.Field(
proto.MESSAGE, number=3, message=io.GcsDestination,
)
image_destination = proto.Field(
proto.MESSAGE, number=4, message=io.ContainerRegistryDestination,
)
name = proto.Field(proto.STRING, number=1,)
output_config = proto.Field(proto.MESSAGE, number=2, message=OutputConfig,)
class ExportModelOperationMetadata(proto.Message):
r"""Details of
[ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]
operation.
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
The common part of the operation metadata.
output_info (google.cloud.aiplatform_v1.types.ExportModelOperationMetadata.OutputInfo):
Output only. Information further describing
the output of this Model export.
"""
class OutputInfo(proto.Message):
r"""Further describes the output of the ExportModel. Supplements
[ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1.ExportModelRequest.OutputConfig].
Attributes:
artifact_output_uri (str):
Output only. If the Model artifact is being
exported to Google Cloud Storage this is the
full path of the directory created, into which
the Model files are being written to.
image_output_uri (str):
Output only. If the Model image is being
exported to Google Container Registry or
Artifact Registry this is the full path of the
image created.
"""
artifact_output_uri = proto.Field(proto.STRING, number=2,)
image_output_uri = proto.Field(proto.STRING, number=3,)
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
output_info = proto.Field(proto.MESSAGE, number=2, message=OutputInfo,)
class ExportModelResponse(proto.Message):
r"""Response message of
[ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]
operation.
"""
class GetModelEvaluationRequest(proto.Message):
r"""Request message for
[ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation].
Attributes:
name (str):
Required. The name of the ModelEvaluation resource. Format:
``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListModelEvaluationsRequest(proto.Message):
r"""Request message for
[ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations].
Attributes:
parent (str):
Required. The resource name of the Model to list the
ModelEvaluations from. Format:
``projects/{project}/locations/{location}/models/{model}``
filter (str):
The standard list filter.
page_size (int):
The standard list page size.
page_token (str):
The standard list page token. Typically obtained via
[ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationsResponse.next_page_token]
of the previous
[ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]
call.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
class ListModelEvaluationsResponse(proto.Message):
r"""Response message for
[ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations].
Attributes:
model_evaluations (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation]):
List of ModelEvaluations in the requested
page.
next_page_token (str):
A token to retrieve next page of results. Pass to
[ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationsRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
model_evaluations = proto.RepeatedField(
proto.MESSAGE, number=1, message=model_evaluation.ModelEvaluation,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetModelEvaluationSliceRequest(proto.Message):
r"""Request message for
[ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice].
Attributes:
name (str):
Required. The name of the ModelEvaluationSlice resource.
Format:
``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListModelEvaluationSlicesRequest(proto.Message):
r"""Request message for
[ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices].
Attributes:
parent (str):
Required. The resource name of the ModelEvaluation to list
the ModelEvaluationSlices from. Format:
``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}``
filter (str):
The standard list filter.
- ``slice.dimension`` - for =.
page_size (int):
The standard list page size.
page_token (str):
The standard list page token. Typically obtained via
[ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesResponse.next_page_token]
of the previous
[ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]
call.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
class ListModelEvaluationSlicesResponse(proto.Message):
r"""Response message for
[ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices].
Attributes:
model_evaluation_slices (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]):
List of ModelEvaluations in the requested
page.
next_page_token (str):
A token to retrieve next page of results. Pass to
[ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
model_evaluation_slices = proto.RepeatedField(
proto.MESSAGE, number=1, message=model_evaluation_slice.ModelEvaluationSlice,
)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleapis/python-aiplatform
|
google/cloud/aiplatform_v1/types/model_service.py
|
Python
|
apache-2.0
| 17,689 |
'''
Created on Jul 18, 2017
@author: I310003
'''
|
BlessedAndy/Programming-Foundations-with-Python
|
Programming Foundations with Python/src/cn/careerwinner/sap/report_scheduler.py
|
Python
|
apache-2.0
| 55 |
import os
import shutil
BASE_FOLDER = os.path.dirname(os.path.abspath(__file__).replace('.pyc', '.py'))
def install(dst_folder):
target_folder = os.path.join(dst_folder, 'Base')
sup_folder = os.path.abspath(os.path.join(BASE_FOLDER, 'assets', 'src', 'IMC', 'Base'))
# Running from source tree.
print('*', target_folder)
if os.path.isdir(sup_folder):
shutil.rmtree(target_folder, ignore_errors=True)
shutil.copytree(sup_folder, target_folder)
return
raise RuntimeError("failed to find C++ assets folders")
|
oceanscan/imctrans
|
imctrans/cpp/base.py
|
Python
|
apache-2.0
| 560 |
# -*- coding: utf-8 -*-
from couchdb.design import ViewDefinition
from couchdb.http import HTTPError
from time import sleep
from random import randint
import os
def add_index_options(doc):
doc['options'] = {'local_seq': True}
start_date_chronograph = ViewDefinition(
'chronograph',
'start_date',
open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'design_files/start_date.js')).read()
)
def sync_design_chronograph(db):
views = [start_date_chronograph]
ViewDefinition.sync_many(db, views, remove_missing=True, callback=add_index_options)
endDate_view = ViewDefinition(
'auctions',
'by_endDate',
''' function(doc) {
var end = new Date(doc.endDate||doc.stages[0].start).getTime()
emit(end, null);
}
'''
)
startDate_view = ViewDefinition(
'auctions',
'by_startDate',
''' function(doc) {
var start = new Date(doc.stages[0].start).getTime()
emit(start, null);
}
'''
)
PreAnnounce_view = ViewDefinition(
'auctions',
'PreAnnounce',
''' function(doc) {
if ((doc.stages.length - 2) == doc.current_stage){
emit(null, null);
}
}
'''
)
def sync_design(db):
views = [endDate_view, startDate_view, PreAnnounce_view]
for view in views:
view.sync(db)
while True:
design = db.get('_design/auctions')
if not design:
design = {'_id': '_design/auctions'}
validate_doc_update = '''
function(newDoc, oldDoc, userCtx, secObj) {
if (userCtx.roles.indexOf('_admin') !== -1) {
return true;
} else {
throw({forbidden: 'Only valid user may change docs.'});
}
}
'''
start_date_filter = '''function(doc, req) {
var now = new Date();
var start = new Date(((doc.stages||[])[0]||{}).start || '2000');
if (start > now){
return true;
}
return false;
}
'''
if 'validate_doc_update' not in design or \
validate_doc_update != design['validate_doc_update'] or \
start_date_filter != design.get('filters', {}).get('by_startDate'):
design['validate_doc_update'] = validate_doc_update
design['filters'] = design.get('filters', {})
design['filters']['by_startDate'] = start_date_filter
try:
return db.save(design)
except HTTPError:
sleep(randint(0, 2000) / 1000.0)
else:
return
|
openprocurement/openprocurement.auction
|
openprocurement/auction/design.py
|
Python
|
apache-2.0
| 2,643 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import base
from cinderclient import utils
class ListExtResource(base.Resource):
@property
def summary(self):
descr = self.description.strip()
if not descr:
return '??'
lines = descr.split("\n")
if len(lines) == 1:
return lines[0]
else:
return lines[0] + "..."
class ListExtManager(base.Manager):
resource_class = ListExtResource
def show_all(self):
return self._list("/extensions", 'extensions')
@utils.service_type('volumev2')
def do_list_extensions(client, _args):
"""List all the os-api extensions that are available."""
extensions = client.list_extensions.show_all()
fields = ["Name", "Summary", "Alias", "Updated"]
utils.print_list(extensions, fields)
|
metacloud/python-cinderclient
|
cinderclient/v2/contrib/list_extensions.py
|
Python
|
apache-2.0
| 1,439 |
from numpy import *
from numpy.random import lognormal
from random import *
import time
Sigma1 = 0.27043285
RiskMap = { 'none' : 0.001, 'low' : Sigma1, 'medium' : 2*Sigma1, 'high' : 3*Sigma1, 'very high' : 4*Sigma1 }
BaseFactors = { 50 : 1, 60 : 1.07091495269662, 70 : 1.15236358602526, 80 : 1.25558553883723, 90 : 1.41421363539235 }
RiskExponent = { 'none' : 0, 'low' : 1, 'medium' : 2, 'high' : 3, 'very high' : 4 }
class Task:
def __init__(self, estimate, type, risk):
self.estimate = estimate
self.type = type
self.sigma = RiskMap[risk]
if type == 'mode':
self.p50 = estimate * math.exp(self.sigma*self.sigma)
elif type == 'mean':
self.p50 = estimate / math.exp(self.sigma*self.sigma/2)
elif type == 'p50':
self.p50 = estimate
elif type == 'p60':
self.p50 = self.CalcMedian(estimate, 60, risk)
elif type == 'p70':
self.p50 = self.CalcMedian(estimate, 70, risk)
elif type == 'p80':
self.p50 = self.CalcMedian(estimate, 80, risk)
elif type == 'p90':
self.p50 = self.CalcMedian(estimate, 90, risk)
else:
raise 'unknown estimate type'
def CalcMedian(self, estimate, confidence, risk):
return estimate/(BaseFactors[confidence] ** RiskExponent[risk])
def Time(self):
return self.p50 * lognormal(0,self.sigma)
def RunMonteCarlo(trials, tasks):
t = time.time()
times = ([])
n = 0
if trials > 100000:
print "RunMonteCarlo: too many trials", trials
trials = 10000
if trials < 1:
trials = 10000
for x in xrange(trials):
total = 0
for task in tasks:
total += task.Time()
times = append(times,total)
elapsed = time.time() - t
times = sort(times)
N = len(times)
cumprob = [[times[t*N/100], t] for t in range(100)]
sigma = log(times).std()
mode = times[N/2] * exp(-sigma*sigma)
nominal = sum([t.estimate for t in tasks])
pnom = 0.0
for x in xrange(trials):
if times[x] > nominal:
pnom = 1. - (1. * x/trials)
break
results = dict(simtime=elapsed, trials=trials, cumprob=cumprob, mean=times.mean(), mode=mode, std=times.std(), risk=sigma, nominal=nominal, pnom=pnom);
return results
|
macterra/galton
|
montecarlo.py
|
Python
|
apache-2.0
| 2,516 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import functools
import itertools
import time
from eventlet import greenthread
import netaddr
from oslo.config import cfg
from nova import block_device
from nova.compute import api as compute
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.image import glance
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.StrOpt('xenapi_image_upload_handler',
default='nova.virt.xenapi.imageupload.glance.GlanceStore',
help='Object Store Driver used to handle image uploads.'),
cfg.BoolOpt('xenapi_generate_swap',
default=False,
help='Whether to generate swap '
'(False means fetching it from OVA)'),
cfg.StrOpt('image_activation_file',
default=None,
help=_('JSON file containing image activation configuration')),
cfg.StrOpt('provider',
default='Rackspace',
help=_('Set the provider name. Defaults to "Rackspace".')),
cfg.StrOpt('region',
default=None,
help=_('Region compute host is in')),
cfg.StrOpt('ip_whitelist_file',
default=None,
help=_('File containing a list of IP addresses to whitelist '
'on managed hosts')),
cfg.StrOpt('max_snapshot_size',
default=0,
help=_('Maximum allowed number of bytes (before compression)'
' that may be uploaded during an instance snapshot.'
' A value of zero means there is no limit.')),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_CONFIGDRIVE = '3'
# Note(johngarbutt) HVM guests only support four devices
# until the PV tools activate, when others before available
# As such, ephemeral disk only available once PV tools load
DEVICE_EPHEMERAL = '4'
# Note(johngarbutt) Currently don't support ISO boot during rescue
# and we must have the ISO visible before the PV drivers start
DEVICE_CD = '1'
class RaxImageActivationConfig(object):
"""Manage RAX image license activation config state."""
def __init__(self):
self._cache = {}
if CONF.image_activation_file:
self._file_path = CONF.find_file(CONF.image_activation_file)
self.reload()
def reload(self):
"""(Re)load config from JSON file
The file is a dict mapping each activation profile idsto
a configuration value.
E.x. file:
{
"1-2-3-4-5": "useful_config_value"
}
"""
def _reload(data):
self._config = jsonutils.loads(data)
utils.read_cached_file(self._file_path, self._cache,
reload_func=_reload)
def get(self, profile_name):
"""Get config values for the given profile name."""
if not CONF.image_activation_file:
return None
self.reload()
return self._config.get(profile_name)
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
# Compare each individual portion of both version strings
for va, vb in zip(a, b):
ret = int(va) - int(vb)
if ret:
return ret
# Fallback to comparing length last
return len(a) - len(b)
def make_step_decorator(context, instance, instance_update):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
progress = round(float(step_info['current']) /
step_info['total'] * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
instance_update(context, instance['uuid'], {'progress': progress})
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
# configs for image license activation:
self._rax_image_activation_config = RaxImageActivationConfig()
msg = _("Importing image upload handler: %s")
LOG.debug(msg % CONF.xenapi_image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenapi_image_upload_handler)
@property
def agent_enabled(self):
return not CONF.xenapi_disable_agent
def _get_agent(self, instance, vm_ref):
if self.agent_enabled:
return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def list_instances(self):
"""List VM instances."""
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
nova_uuids = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
other_config = vm_rec['other_config']
nova_uuid = other_config.get('nova_uuid')
if nova_uuid:
nova_uuids.append(nova_uuid)
return nova_uuids
def confirm_migration(self, migration, instance, network_info):
self._destroy_orig_vm(instance, network_info)
def _destroy_orig_vm(self, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info=network_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
# because some guests (windows) don't load PV drivers quickly
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'],
mount_device,
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None):
self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info)
def _restore_orig_vm_and_cleanup_orphan(self, instance, block_device_info):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
# NOTE(danms): if we're reverting migration in the failure case,
# make sure we don't have a conflicting vm still running here,
# as might be the case in a failed migrate-to-same-host situation
new_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is not None:
if new_ref is not None:
self._destroy(instance, new_ref)
# Remove the '-orig' suffix (which was added in case the
# resized VM ends up on the source host, common during
# testing)
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._attach_mapped_block_devices(instance, block_device_info)
elif new_ref is not None:
# We crashed before the -orig backup was made
vm_ref = new_ref
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
root_vdi = vm_utils.move_disks(self._session, instance, disk_info)
if resize_instance:
self._resize_instance(instance, root_vdi)
# Check if kernel and ramdisk are external
kernel_file = None
ramdisk_file = None
name_label = instance['name']
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
disk_image_type = vm_utils.determine_disk_image_type(image_meta)
vm_ref = self._create_vm(context, instance, instance['name'],
{'root': root_vdi},
disk_image_type, network_info, kernel_file,
ramdisk_file)
self._attach_mapped_block_devices(instance, block_device_info)
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
# Attached volumes that have become non-responsive will prevent a VM
# from starting, so scan for these before attempting to start
#
# In order to make sure this detach is consistent (virt, BDM, cinder),
# we only detach in the virt-layer if a callback is provided.
if bad_volumes_callback:
bad_devices = self._volumeops.find_bad_volumes(vm_ref)
for device_name in bad_devices:
self._volumeops.detach_volume(
None, instance['name'], device_name)
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
# Allow higher-layers a chance to detach bad-volumes as well (in order
# to cleanup BDM entries and detach in Cinder)
if bad_volumes_callback and bad_devices:
bad_volumes_callback(bad_devices)
def _create_disks(self, context, instance, name_label, disk_image_type,
image_meta, block_device_info=None):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label,
image_meta.get('id'),
disk_image_type,
block_device_info=block_device_info)
# Just get the VDI ref once
for vdi in vdis.itervalues():
vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
vdi['uuid'])
root_vdi = vdis.get('root')
if root_vdi:
self._resize_instance(instance, root_vdi)
return vdis
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
name_label=None, rescue=False):
if name_label is None:
name_label = instance['name']
step = make_step_decorator(context, instance,
self._virtapi.instance_update)
@step
def determine_disk_image_type_step(undo_mgr):
return vm_utils.determine_disk_image_type(image_meta)
@step
def create_disks_step(undo_mgr, disk_image_type, image_meta):
vdis = self._create_disks(context, instance, name_label,
disk_image_type, image_meta,
block_device_info=block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
if not vdi.get('osvol')]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
@step
def create_kernel_ramdisk_step(undo_mgr):
kernel_file = None
ramdisk_file = None
if instance['kernel_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['kernel_id'],
vm_utils.ImageType.KERNEL)
kernel_file = vdis['kernel'].get('file')
if instance['ramdisk_id']:
vdis = vm_utils.create_kernel_image(context, self._session,
instance, name_label, instance['ramdisk_id'],
vm_utils.ImageType.RAMDISK)
ramdisk_file = vdis['ramdisk'].get('file')
def undo_create_kernel_ramdisk():
if kernel_file or ramdisk_file:
LOG.debug(_("Removing kernel/ramdisk files from dom0"),
instance=instance)
vm_utils.destroy_kernel_ramdisk(
self._session, kernel_file, ramdisk_file)
undo_mgr.undo_with(undo_create_kernel_ramdisk)
return kernel_file, ramdisk_file
@step
def create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file):
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
def undo_create_vm():
self._destroy(instance, vm_ref, network_info=network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type, admin_password,
injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
@step
def setup_network_step(undo_mgr, vm_ref, vdis):
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
@step
def inject_metadata_step(undo_mgr, vm_ref):
self.inject_instance_metadata(instance, vm_ref)
@step
def inject_provider_data_step(undo_mgr, vm_ref):
self.inject_provider_data(instance, vm_ref, context)
@step
def prepare_security_group_filters_step(undo_mgr):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
self._boot_new_instance(instance, vm_ref, injected_files,
admin_password, image_meta)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
@step
def bdev_set_default_root(undo_mgr):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
undo_mgr = utils.UndoManager()
try:
# NOTE(sirp): The create_disks() step will potentially take a
# *very* long time to complete since it has to fetch the image
# over the network and images can be several gigs in size. To
# avoid progress remaining at 0% for too long, make sure the
# first step is something that completes rather quickly.
bdev_set_default_root(undo_mgr)
disk_image_type = determine_disk_image_type_step(undo_mgr)
vdis = create_disks_step(undo_mgr, disk_image_type, image_meta)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type)
setup_network_step(undo_mgr, vm_ref, vdis)
inject_metadata_step(undo_mgr, vm_ref)
inject_provider_data_step(undo_mgr, vm_ref)
prepare_security_group_filters_step(undo_mgr)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _create_vm(self, context, instance, name_label, vdis,
disk_image_type, network_info, kernel_file=None,
ramdisk_file=None, rescue=False):
"""Create VM instance."""
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
# NOTE(mikal): file injection only happens if we are _not_ using a
# configdrive.
if not configdrive.required_by(instance):
self.inject_instance_metadata(instance, vm_ref)
self.inject_provider_data(instance, vm_ref, context)
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
rescue):
# Alter the image before VM start for network injection.
if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
self.inject_hostname(instance, vm_ref, hostname)
def _create_vm_record(self, context, instance, name_label, vdis,
disk_image_type, kernel_file, ramdisk_file):
"""Create the VM record in Xen, making sure that we do not create
a duplicate name-label. Also do a rough sanity check on memory
to try to short-circuit a potential failure later. (The memory
check only accounts for running VMs, so it can miss other builds
that are in progress.)
"""
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
# Ensure enough free memory is available
if not vm_utils.ensure_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
mode = self._determine_vm_mode(instance, vdis, disk_image_type)
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
self._virtapi.instance_update(context,
instance['uuid'], {'vm_mode': mode})
use_pv_kernel = (mode == vm_mode.XEN)
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
return vm_ref
def _determine_vm_mode(self, instance, vdis, disk_image_type):
current_mode = vm_mode.get_from_instance(instance)
if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM:
return current_mode
is_pv = False
if 'root' in vdis:
os_type = instance['os_type']
vdi_ref = vdis['root']['ref']
is_pv = vm_utils.determine_is_pv(self._session, vdi_ref,
disk_image_type, os_type)
if is_pv:
return vm_mode.XEN
else:
return vm_mode.HVM
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = flavors.extract_instance_type(instance)
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
root_disk_size = instance_type['root_gb']
if root_disk_size > 0:
vm_utils.generate_iso_blank_root_disk(self._session, instance,
vm_ref, DEVICE_ROOT, name_label, root_disk_size)
cd_vdi = vdis.pop('iso')
vm_utils.attach_cd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
vm_utils.auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
# Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
# Attach (optional) configdrive v2 disk
if configdrive.required_by(instance):
vm_utils.generate_configdrive(self._session, instance, vm_ref,
DEVICE_CONFIGDRIVE,
admin_password=admin_password,
files=files)
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password, image_meta):
"""Boot a new instance and configure it."""
LOG.debug(_('Starting VM'), instance=instance)
self._start(instance, vm_ref)
ctx = nova_context.get_admin_context()
# Wait for boot to finish
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
if self.agent_enabled:
agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance['os_type'],
'architecture': instance['architecture']})
# Update agent, if necessary
# This also waits until the agent starts
agent = self._get_agent(instance, vm_ref)
version = agent.get_agent_version()
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build)
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject ssh key.
agent.inject_ssh_key()
# Inject files, if necessary
if injected_files:
# Inject any files, if specified
for path, contents in injected_files:
agent.inject_file(path, contents)
# Set admin password, if necessary
if admin_password and not no_agent:
agent.set_admin_password(admin_password)
# Reset network config
agent.resetnetwork()
# Set VCPU weight
instance_type = flavors.extract_instance_type(instance)
vcpu_weight = instance_type['vcpu_weight']
if vcpu_weight is not None:
LOG.debug(_("Setting VCPU weight"), instance=instance)
self._session.call_xenapi('VM.add_to_VCPUs_params', vm_ref,
'weight', str(vcpu_weight))
# Activate OS (if necessary)
profile = image_meta.get('properties', {}).\
get('rax_activation_profile')
if profile:
LOG.debug(_("RAX Activation Profile: %r"), profile,
instance=instance)
# get matching activation config for this profile:
config = self._rax_image_activation_config.get(profile)
if config:
agent.activate_instance(self._session, instance, vm_ref,
config)
def _get_vm_opaque_ref(self, instance, check_rescue=False):
"""Get xapi OpaqueRef from a db record.
:param check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
vm_ref = vm_utils.lookup(self._session, instance['name'], check_rescue)
if vm_ref is None:
raise exception.NotFound(_('Could not find VM with name %s') %
instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-data-store: Once coalesced, we call a plugin on the
XenServer that will bundle the VHDs together and then push the
bundle. Depending on the configured value of
'xenapi_image_upload_handler', image data may be pushed to
Glance or the specified data store.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
max_size = CONF.max_snapshot_size
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label,
update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
try:
self.image_upload_handler.upload_image(context,
self._session,
instance,
vdi_uuids,
image_id,
max_size)
except self._session.XenAPI.Failure as exc:
_type, _method, error = exc.details[:3]
if error == 'VHDsTooLargeError':
LOG.warn(_("Refusing to create snapshot. Instance size is"
" greater than maximum allowed snapshot size"),
instance=instance)
image_service = glance.get_default_image_service()
image_service.update(context, image_id,
{'status': 'error'})
update_task_state(task_state=None,
expected_state=task_states.IMAGE_UPLOADING)
return
else:
raise
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
locals(), instance=instance)
instance_uuid = instance['uuid']
try:
self._session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=instance_uuid, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except self._session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the _create_disks step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %(progress)d"), locals(),
instance=instance)
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _resize_ensure_vm_is_shutdown(self, instance, vm_ref):
if vm_utils.is_vm_shutdown(self._session, vm_ref):
LOG.debug(_("VM was already shutdown."), instance=instance)
return
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref):
raise exception.ResizeError(
reason=_("Unable to terminate instance."))
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
if not instance['auto_disk_config']:
reason = _('Resize down not allowed without auto_disk_config')
raise exception.ResizeError(reason=reason)
step = make_step_decorator(context, instance,
self._virtapi.instance_update)
@step
def fake_step_to_match_resizing_up():
pass
@step
def rename_and_power_off_vm(undo_mgr):
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._apply_orig_vm_name_label(instance, vm_ref)
def restore_orig_vm():
# Do not need to restore block devices, not yet been removed
self._restore_orig_vm_and_cleanup_orphan(instance, None)
undo_mgr.undo_with(restore_orig_vm)
@step
def create_copy_vdi_and_resize(undo_mgr, old_vdi_ref):
new_vdi_ref, new_vdi_uuid = vm_utils.resize_disk(self._session,
instance, old_vdi_ref, instance_type)
def cleanup_vdi_copy():
vm_utils.destroy_vdi(self._session, new_vdi_ref)
undo_mgr.undo_with(cleanup_vdi_copy)
return new_vdi_ref, new_vdi_uuid
@step
def transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid):
self._migrate_vhd(instance, new_vdi_uuid, dest, sr_path, 0)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_vdi_ref)
@step
def fake_step_to_be_executed_by_finish_migration():
pass
undo_mgr = utils.UndoManager()
try:
fake_step_to_match_resizing_up()
rename_and_power_off_vm(undo_mgr)
old_vdi_ref, _ignore = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
new_vdi_ref, new_vdi_uuid = create_copy_vdi_and_resize(
undo_mgr, old_vdi_ref)
transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid)
except Exception, error:
msg = _("_migrate_disk_resizing_down failed. "
"Restoring orig vm due_to: %{exception}.")
LOG.exception(msg, instance=instance)
undo_mgr._rollback()
raise exception.InstanceFaultRollback(error)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
self._apply_orig_vm_name_label(instance, vm_ref)
# 1. Create Snapshot
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as vdi_uuids:
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Transfer the immutable VHDs (base-copies)
#
# The first VHD will be the leaf (aka COW) that is being used by
# the VM. For this step, we're only interested in the immutable
# VHDs which are all of the parents of the leaf VHD.
for seq_num, vdi_uuid in itertools.islice(
enumerate(vdi_uuids), 1, None):
self._migrate_vhd(instance, vdi_uuid, dest, sr_path, seq_num)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
self._migrate_vhd(instance, cow_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def _apply_orig_vm_name_label(self, instance, vm_ref):
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, block_device_info):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
resize_down = old_gb > new_gb
if resize_down:
self._migrate_disk_resizing_down(
context, instance, dest, instance_type, vm_ref, sr_path)
else:
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
self._detach_block_devices_from_orig_vm(instance, block_device_info)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _detach_block_devices_from_orig_vm(self, instance, block_device_info):
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
name_label = self._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info, name_label,
mount_device)
def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
if not new_disk_size:
return
# Get current size of VDI
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"), locals(), instance=instance)
resize_func_name = self.check_resize_func_name()
self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
def check_resize_func_name(self):
"""Check the function name used to resize an instance based
on product_brand and product_version."""
brand = self._session.product_brand
version = self._session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if bool(version) and bool(brand):
xcp = brand == 'XCP'
r1_2_or_above = (
(
version[0] == 1
and version[1] > 1
)
or version[0] > 1)
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def reboot(self, instance, reboot_type, bad_volumes_callback=None):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
# remove existing filters
vm_ref = self._get_vm_opaque_ref(instance, check_rescue=True)
try:
if reboot_type == "HARD":
self._session.call_xenapi('VM.hard_reboot', vm_ref)
else:
self._session.call_xenapi('VM.clean_reboot', vm_ref)
except self._session.XenAPI.Failure as exc:
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
" volumes and starting halted instance"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
else:
raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.set_admin_password(new_pass)
else:
raise NotImplementedError()
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.inject_file(path, contents)
else:
raise NotImplementedError()
@staticmethod
def _sanitize_xenstore_key(key):
"""
Xenstore only allows the following characters as keys:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789-/_@
So convert the others to _
Also convert / to _, because that is somewhat like a path
separator.
"""
allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_@")
return ''.join([x in allowed_chars and x or '_' for x in key])
def inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
def store_meta(topdir, data_list):
for item in data_list:
key = self._sanitize_xenstore_key(item['key'])
value = item['value'] or ''
self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key),
jsonutils.dumps(value))
# Store user metadata
store_meta('vm-data/user-metadata', instance['metadata'])
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
vm_ref = self._get_vm_opaque_ref(instance)
for key, change in diff.items():
key = self._sanitize_xenstore_key(key)
location = 'vm-data/user-metadata/%s' % key
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
elif change[0] == '+':
self._add_to_param_xenstore(vm_ref, location,
jsonutils.dumps(change[1]))
try:
self._write_to_xenstore(instance, location, change[1],
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
return None
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
return
for vdi_ref in vdi_refs:
try:
vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
# 2. We only have kernel xor ramdisk
raise exception.InstanceUnacceptable(instance_id=instance_uuid,
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
if kernel or ramdisk:
vm_utils.destroy_kernel_ramdisk(self._session, kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
LOG.info(_("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
vm_ref = vm_utils.lookup(self._session, instance['name'])
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
# NOTE(sirp): `block_device_info` is not used, information about which
# volumes should be detached is determined by the
# VBD.other_config['osvol'] attribute
return self._destroy(instance, vm_ref, network_info=network_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
2. Destroying associated VDIs.
3. Destroying kernel and ramdisk files (if necessary).
4. Destroying that actual VM record.
"""
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
if destroy_disks:
self._volumeops.detach_all(vm_ref)
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.pause', vm_ref)
def unpause(self, instance):
"""Unpause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.unpause', vm_ref)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._acquire_bootlock(vm_ref)
self._session.call_xenapi('VM.suspend', vm_ref)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._session.call_xenapi('VM.resume', vm_ref, False, True)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
- shutdown the instance VM.
- set 'bootlock' to prevent the instance from starting in rescue.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
rescue_name_label = '%s-rescue' % instance['name']
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
def unrescue(self, instance):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
- teardown the rescue VM.
- release the bootlock to allow the instance VM to start.
"""
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
instance_id=instance['uuid'])
original_vm_ref = self._get_vm_opaque_ref(instance)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
"""Restore the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._start(instance, vm_ref)
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._start(instance, vm_ref)
def _cancel_stale_tasks(self, timeout, task):
"""Cancel the given tasks that are older than the given timeout."""
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
reboot state for >= the given timeout
"""
# NOTE(jk0): All existing clean_reboot tasks must be cancelled before
# we can kick off the hard_reboot tasks.
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
def _get_vifs_for_instance(self, vm_rec):
return [self._session.call_xenapi("VIF.get_record", vrec)
for vrec in vm_rec['VIFs']]
def _get_vif_device_map(self, vm_rec):
vif_map = {}
for vif in self._get_vifs_for_instance(vm_rec):
vif_map[vif['device']] = vif['MAC']
return vif_map
def get_all_bw_counters(self):
"""Return running bandwidth counter for each interface on each
running VM"""
counters = vm_utils.fetch_bandwidth(self._session)
bw = {}
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vif_map = self._get_vif_device_map(vm_rec)
name = vm_rec['name_label']
if 'nova_uuid' not in vm_rec['other_config']:
continue
dom = vm_rec.get('domid')
if dom is None or dom not in counters:
continue
vifs_bw = bw.setdefault(name, {})
for vif_num, vif_data in counters[dom].iteritems():
mac = vif_map[vif_num]
vif_data['mac_address'] = mac
vifs_bw[mac] = vif_data
return bw
def get_console_output(self, instance):
"""Return snapshot of console."""
# TODO(armando-migliaccio): implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
if instance['vm_state'] == vm_states.RESCUED:
name = '%s-rescue' % instance['name']
vm_ref = vm_utils.lookup(self._session, name)
if vm_ref is None:
# The rescue instance might not be ready at this point.
raise exception.InstanceNotReady(instance_id=instance['uuid'])
else:
vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
# The compute manager expects InstanceNotFound for this case.
raise exception.InstanceNotFound(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
"""convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_route(route):
return {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
network = vif['network']
v4_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 4]
v6_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 6]
# NOTE(tr3buchet): routes and DNS come from all subnets
routes = [convert_route(route) for subnet in network['subnets']
for route in subnet['routes']]
dns = [get_ip(ip) for subnet in network['subnets']
for ip in subnet['dns']]
info_dict = {'label': network['label'],
'mac': vif['address']}
if v4_subnets:
# NOTE(tr3buchet): gateway and broadcast from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway'] = get_ip(v4_subnets[0]['gateway'])
info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast)
info_dict['ips'] = [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']]
if v6_subnets:
# NOTE(tr3buchet): gateway from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet)
for subnet in v6_subnets
for ip in subnet['ips']]
if routes:
info_dict['routes'] = routes
if dns:
info_dict['dns'] = list(set(dns))
return info_dict
def _remove_vif_from_network_info(self, instance, vm_ref, mac):
location = ('vm-data/networking/%s' % mac.replace(':', ''))
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
for vif in network_info:
xs_data = self._vif_xenstore_data(vif)
location = ('vm-data/networking/%s' %
vif['address'].replace(':', ''))
self._add_to_param_xenstore(vm_ref,
location,
jsonutils.dumps(xs_data))
try:
self._write_to_xenstore(instance, location, xs_data,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
def _get_highest_vif_device_id(self, vm_rec):
"""Enumerates all the VIFs and gets the next highest device id."""
max_device = -1
for device, vif in self._get_vif_device_map(vm_rec).iteritems():
max_device = max(int(device), max_device)
return max_device + 1
def create_vif_for_instance(self, instance, vif_info, hotplug):
vm_ref = vm_utils.lookup(self._session, instance["name"])
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
device = self._get_highest_vif_device_id(vm_rec)
vif_rec = self.vif_driver.plug(instance, vif_info,
vm_ref=vm_ref, device=device)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
if hotplug:
self._session.call_xenapi('VIF.plug', vif_ref)
return vif_ref
def delete_vif_for_instance(self, instance, vif, hot_unplug):
vm_ref = vm_utils.lookup(self._session, instance["name"])
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
for vif_rec in self._get_vifs_for_instance(vm_rec):
if vif_rec["MAC"] == vif["mac_address"]:
vif_ref = self._session.call_xenapi("VIF.get_by_uuid",
vif_rec["uuid"])
if hot_unplug:
self._session.call_xenapi("VIF.unplug", vif_ref)
self._session.call_xenapi("VIF.destroy", vif_ref)
self._remove_vif_from_network_info(instance, vm_ref,
vif["mac_address"])
return
raise Exception(_("No VIF found for instance %s") % instance["uuid"])
def _create_vifs(self, vm_ref, instance, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_record", vm_ref)
for device, vif in enumerate(network_info):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %(network_ref)s'),
locals(), instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
locals(), instance=instance)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def reset_network(self, instance):
"""Calls resetnetwork method in agent."""
if self.agent_enabled:
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.resetnetwork()
else:
raise NotImplementedError()
def inject_hostname(self, instance, vm_ref, hostname):
"""Inject the hostname of the instance into the xenstore."""
if instance['os_type'] == "windows":
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname to xenstore"), instance=instance)
self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname)
def inject_provider_data(self, instance, vm_ref, context):
"""Inject provider data for the instance into the xenstore."""
# Store region and roles
self._add_to_param_xenstore(vm_ref, 'vm-data/provider_data/provider',
CONF.provider or '')
self._add_to_param_xenstore(vm_ref, 'vm-data/provider_data/region',
CONF.region or '')
self._add_to_param_xenstore(vm_ref, 'vm-data/provider_data/roles',
jsonutils.dumps(context.roles))
# Now build up the IP whitelist data
location = 'vm-data/provider_data/ip_whitelist'
self._add_to_param_xenstore(vm_ref, location, '')
if CONF.ip_whitelist_file:
idx = 0
with open(CONF.ip_whitelist_file) as f:
for entry in f:
entry = entry.strip()
# Skip blank lines and comments
if not entry or entry[0] == '#':
continue
self._add_to_param_xenstore(vm_ref, '%s/%s' %
(location, idx), entry)
idx += 1
def _write_to_xenstore(self, instance, path, value, vm_ref=None):
"""
Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
return self._make_plugin_call('xenstore.py', 'write_record', instance,
vm_ref=vm_ref, path=path,
value=jsonutils.dumps(value))
def _delete_from_xenstore(self, instance, path, vm_ref=None):
"""
Deletes the value from the xenstore record for the given VM at
the specified location. A XenAPIPlugin.PluginError will be
raised if any error is encountered in the delete process.
"""
return self._make_plugin_call('xenstore.py', 'delete_record', instance,
vm_ref=vm_ref, path=path)
def _make_plugin_call(self, plugin, method, instance=None, vm_ref=None,
**addl_args):
"""
Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
args = {}
if instance or vm_ref:
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
args['dom_id'] = vm_rec['domid']
args.update(addl_args)
try:
return self._session.call_plugin(plugin, method, args)
except self._session.XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
locals(), instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'), locals(), instance=instance)
return {'returncode': 'error', 'message': err_msg}
return None
def _add_to_param_xenstore(self, vm_ref, key, val):
"""
Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten
"""
self._remove_from_param_xenstore(vm_ref, key)
self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val)
def _remove_from_param_xenstore(self, vm_ref, key):
"""
Takes a single key and removes it from the xenstore parameter
record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
current_aggregate = self._virtapi.aggregate_get_by_host(
context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
reason = _('Destination host:%(hostname)s must be in the same '
'aggregate as the source server')
raise exception.MigrationPreCheckError(reason=reason % locals())
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
def _migrate_receive(self, ctxt):
destref = self._session.get_xenapi_host()
# Get the network to for migrate.
# This is the one associated with the pif marked management. From cli:
# uuid=`xe pif-list --minimal management=true`
# xe pif-param-get param-name=network-uuid uuid=$uuid
expr = 'field "management" = "true"'
pifs = self._session.call_xenapi('PIF.get_all_records_where',
expr)
if len(pifs) != 1:
msg = _('No suitable network for migrate')
raise exception.MigrationPreCheckError(reason=msg)
nwref = pifs[pifs.keys()[0]]['network']
try:
options = {}
migrate_data = self._session.call_xenapi("host.migrate_receive",
destref,
nwref,
options)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _('Migrate Receive failed')
raise exception.MigrationPreCheckError(reason=msg)
return migrate_data
def _get_iscsi_srs(self, ctxt, instance_ref):
vm_ref = self._get_vm_opaque_ref(instance_ref)
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
iscsi_srs = []
for vbd_ref in vbd_refs:
vdi_ref = self._session.call_xenapi("VBD.get_VDI", vbd_ref)
# Check if it's on an iSCSI SR
sr_ref = self._session.call_xenapi("VDI.get_SR", vdi_ref)
if self._session.call_xenapi("SR.get_type", sr_ref) == 'iscsi':
iscsi_srs.append(sr_ref)
return iscsi_srs
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
dest_check_data = {}
if block_migration:
migrate_send_data = self._migrate_receive(ctxt)
destination_sr_ref = vm_utils.safe_find_sr(self._session)
dest_check_data.update(
{"block_migration": block_migration,
"migrate_data": {"migrate_send_data": migrate_send_data,
"destination_sr_ref": destination_sr_ref}})
else:
src = instance_ref['host']
self._ensure_host_in_aggregate(ctxt, src)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
return dest_check_data
def _is_xsm_sr_check_relaxed(self):
try:
return self.cached_xsm_sr_relaxed
except AttributeError:
config_value = None
try:
config_value = self._make_plugin_call('config_file',
'get_val',
key='relax-xsm-sr-check')
except Exception as exc:
LOG.exception(exc)
self.cached_xsm_sr_relaxed = config_value == "true"
return self.cached_xsm_sr_relaxed
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it's possible to execute live migration on the source side.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
destination, includes block_migration flag
"""
if len(self._get_iscsi_srs(ctxt, instance_ref)) > 0:
# XAPI must support the relaxed SR check for live migrating with
# iSCSI VBDs
if not self._is_xsm_sr_check_relaxed():
raise exception.MigrationError(_('XAPI supporting '
'relax-xsm-sr-check=true requried'))
if 'migrate_data' in dest_check_data:
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = dest_check_data['migrate_data']
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _('VM.assert_can_migrate failed')
raise exception.MigrationPreCheckError(reason=msg)
return dest_check_data
def _generate_vdi_map(self, destination_sr_ref, vm_ref, sr_ref=None):
"""generate a vdi_map for _call_live_migrate_command."""
if sr_ref is None:
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
# Add destination SR refs for all of the VDIs that we created
# as part of the pre migration callback
if 'pre_live_migration_result' in migrate_data:
pre_migrate_data = migrate_data['pre_live_migration_result']
sr_uuid_map = pre_migrate_data.get('sr_uuid_map', [])
for sr_uuid in sr_uuid_map:
# Source and destination SRs have the same UUID, so get the
# reference for the local SR
sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
vdi_map.update(
self._generate_vdi_map(
sr_uuid_map[sr_uuid], vm_ref, sr_ref))
vif_map = {}
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
def live_migrate(self, context, instance, destination_hostname,
post_method, recover_method, block_migration,
migrate_data=None):
try:
vm_ref = self._get_vm_opaque_ref(instance)
if block_migration:
if not migrate_data:
raise exception.InvalidParameterValue('Block Migration '
'requires migrate data from destination')
iscsi_srs = self._get_iscsi_srs(context, instance)
try:
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Send failed'))
# Tidy up the iSCSI SRs
for sr_ref in iscsi_srs:
volume_utils.forget_sr(self._session, sr_ref)
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {})
post_method(context, instance, destination_hostname,
block_migration)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
usage = {}
def _is_active(vm_rec):
power_state = vm_rec['power_state'].lower()
return power_state in ['running', 'paused']
def _get_uuid(vm_rec):
other_config = vm_rec['other_config']
return other_config.get('nova_uuid', None)
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage
def attach_block_device_volumes(self, block_device_info):
sr_uuid_map = {}
try:
if block_device_info is not None:
for block_device_map in block_device_info[
'block_device_mapping']:
sr_uuid, _ = self._volumeops.attach_volume(
block_device_map['connection_info'],
None,
block_device_map['mount_device'],
hotplug=False)
sr_ref = self._session.call_xenapi('SR.get_by_uuid',
sr_uuid)
sr_uuid_map[sr_uuid] = sr_ref
except Exception:
with excutils.save_and_reraise_exception():
# Disconnect the volumes we just connected
for sr in sr_uuid_map:
volume_utils.forget_sr(self._session, sr_uuid_map[sr_ref])
return sr_uuid_map
|
sridevikoushik31/nova
|
nova/virt/xenapi/vmops.py
|
Python
|
apache-2.0
| 88,390 |
####################################
## batch code for WF simulation
####################################
import sys, array, os, getpass
from subprocess import call
import subprocess as subp
import time
import math as math
from subprocess import Popen, PIPE
if len(sys.argv)<2:
print("========= Syntax ========")
print("python BatchSimulation.py ....")
print("<Output path (abs.)>")
print("<number of jobs>")
print("<number of events in each job>")
print("<enable PMT after pulses ?(0 for disable)>")
print("<enable S2 after pulses ?(0 for disable)>")
print("<photon number lower>")
print("<photon number upper>")
print("<electron number lower>")
print("<electron number upper>")
print("<If enable S1-S2 correlation (0 for no, 1 for yes)>")
print("<If use Public node (0 for no(xenon1t nodes); 1 for yes; 2 for kicp nodes)>")
exit()
OutputGeneralPath = sys.argv[1]
NumJobs = int(sys.argv[2])
NumEvents = int(sys.argv[3])
PMTAfterpulseFlag = int(sys.argv[4])
S2AfterpulseFlag = int(sys.argv[5])
PhotonNumLower = int(sys.argv[6])
PhotonNumUpper = int(sys.argv[7])
ElectronNumLower = int(sys.argv[8])
ElectronNumUpper = int(sys.argv[9])
IfEnableS1S2Correlation = int(sys.argv[10])
IfUsePublicNodes = int(sys.argv[11])
MaxNumJob = 64
if not IfUsePublicNodes:
MaxNumJob=200
##### Start batching #########
CurrentPath = os.getcwd()
print (CurrentPath)
CurrentUser = getpass.getuser()
for i in range(NumJobs):
RunString = "%06d" % i
# create folder
OutputPath = OutputGeneralPath + "/" + RunString
if os.path.exists(OutputPath):
subp.call("rm -r "+OutputPath, shell=True)
subp.call("mkdir -p "+OutputPath, shell=True)
# define filenames
SubmitFile = OutputPath+"/submit_"+ RunString + ".sh"
SubmitOutputFilename = OutputPath+"/submit_"+ RunString + ".log"
SubmitErrorFilename = OutputPath+"/submit_"+ RunString + ".log"
# create the basic submit
subp.call("echo '#!/bin/bash\n' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --output="+SubmitOutputFilename+"' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --error="+SubmitErrorFilename+"' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --time=03:59:00' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --account=pi-lgrandi' >> "+SubmitFile, shell=True)
if IfUsePublicNodes==0:
subp.call("echo '#SBATCH --qos=xenon1t' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --partition=xenon1t\n' >> "+SubmitFile, shell=True)
elif IfUsePublicNodes==2:
subp.call("echo '#SBATCH --qos=xenon1t-kicp' >> "+SubmitFile, shell=True)
subp.call("echo '#SBATCH --partition=kicp\n' >> "+SubmitFile, shell=True)
Command = CurrentPath+"/./run_fax.sh "+str(PhotonNumLower)+" "+str(PhotonNumUpper)+" "+str(ElectronNumLower)+" "+str(ElectronNumUpper)+" "+str(PMTAfterpulseFlag)+" "+str(S2AfterpulseFlag)+" "+str(NumEvents)+" "+OutputGeneralPath+" "+RunString+" "+str(IfEnableS1S2Correlation)
subp.call("echo '"+Command+"\n' >> "+SubmitFile, shell=True)
SubmitPath = OutputPath
#submit
IfSubmitted=0
while IfSubmitted==0:
Partition = "sandyb" # public
if not IfUsePublicNodes:
Partition = "xenon1t"
elif IfUsePublicNodes==2:
Partition = "kicp"
p1 = Popen(["squeue","--partition="+Partition, "--user="+CurrentUser], stdout=PIPE)
p2 = Popen(["wc", "-l"], stdin=p1.stdout, stdout=PIPE)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
output = p2.communicate()[0]
Status=subp.call("squeue --partition="+Partition+" --user="+CurrentUser +" | wc -l", shell=True)
Output=int(output)
#print(Status)
print("Current job running number "+str(Output))
if Status==0 and Output<MaxNumJob:
#sbatch it
subp.call("cd "+SubmitPath+";sbatch "+SubmitFile+";cd -", shell=True)
IfSubmitted=1
time.sleep(2.0)
else:
time.sleep(30)
|
XENON1T/processing
|
montecarlo/fax_waveform/MidwayBatch.py
|
Python
|
apache-2.0
| 4,098 |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('studio', '[email protected]'),
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/Users/studio/Sites/joatu-master/database.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
ALLOWED_HOSTS = ['.joatu.azurewebsites.net']
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@h8_wz=yshx96$%%tm$id#96gbllw3je7)%fhx@lja+_c%_(n&'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
('common', '/Users/studio/Sites/joatu-master/static/img/common'),
('css', '/Users/studio/Sites/joatu-master/static/css'),
)
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
STATIC_URL = '/static/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/Users/studio/Sites/joatu-master/media/'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/Users/studio/Sites/joatu-master/templates',
)
|
mdblv2/joatu-django
|
application/joatu/local_settings.py
|
Python
|
apache-2.0
| 2,019 |
import jumon
def main():
jumon.entry(__name__)
|
TakesxiSximada/syaml
|
src/syaml/commands/__init__.py
|
Python
|
apache-2.0
| 53 |
import commands
import time
import MySQLdb
from locust import Locust, events, task, TaskSet
def show_tables(self):
print "Running show_tables..."
print self.client.query("SHOW TABLES IN mysql", name="SHOW TABLES")
def mysql_user(self):
print "Running show users..."
print self.client.query("SELECT * FROM mysql.user", name="SHOW USERS")
def city_select(self):
print "City SELECT..."
query = "SELECT * FROM City"
name = "City SELECT"
print "%s: %s" %(self.id, len(self.client.query(query, name)) )
def country_select(self):
print "Country SELECT..."
query = "SELECT * FROM Country"
name = "Country SELECT"
print "%s: %s" %(self.id, len(self.client.query(query, name)) )
def two_table_join(self):
print "2 table JOIN..."
query = "SELECT * FROM City, Country WHERE City.CountryCode=Country.Code"
name = "two_table_join"
print "%s: %s" %(self.id, len(self.client.query(query, name)) )
def three_table_join(self):
print "3 table JOIN..."
query = "SELECT * FROM City, Country, CountryLanguage WHERE City.CountryCode=Country.Code AND CountryLanguage.CountryCode=Country.Code"
name = "three_table_join"
print "%s: %s" %(self.id, len(self.client.query(query, name)) )
class MariadbClient():
"""
Simple, sample XML RPC client implementation that wraps xmlrpclib.ServerProxy and
fires locust events on request_success and request_failure, so that all requests
gets tracked in locust's statistics.
"""
def __init__(self):
try:
print 'Hello!'
except Exception as e:
print Exception, e
def query(self, query, name):
start_time = time.time()
try:
cmd = 'mysql -uroot world -e "%s"' %query
status, output = commands.getstatusoutput(cmd)
print "%s\ncmd: %s\nstatus: %s\n\n%s" %('#'*80, cmd, status, '#'*80)
except Exception as e:
total_time = float((time.time() - start_time) * 1000)
print Exception, e
events.request_failure.fire(request_type="mariadb", name=name, response_time=total_time, exception=e)
return None
else:
total_time = float((time.time() - start_time) * 1000)
events.request_success.fire(request_type="mariadb", name=name, response_time=total_time, response_length=0)
# In this example, I've hardcoded response_length=0. If we would want the response length to be
# reported correctly in the statistics, we would probably need to hook in at a lower level
return output
class Task_set(TaskSet):
def on_start(self):
""" on_start is called when a Locust start before any task is scheduled """
self.id = str(self.locust).split('object at')[1].strip().replace('>','')
tasks = {three_table_join: 10,
two_table_join: 5,
city_select: 3,
country_select: 1
}
class MariadbLocust(Locust):
"""
This is the abstract Locust class which should be subclassed. It provides an XML-RPC client
that can be used to make XML-RPC requests that will be tracked in Locust's statistics.
"""
def __init__(self, *args, **kwargs):
super(MariadbLocust, self).__init__(*args, **kwargs)
self.client = MariadbClient()
task_set = Task_set
class ApiUser(MariadbLocust):
def __init__(self):
super(ApiUser, self).__init__()
self.host = "http://127.0.0.1:3306/"
self.min_wait = 0
self.max_wait = 10
task_set = Task_set
|
pcrews/rannsaka
|
mysql/mysql_demo.py
|
Python
|
apache-2.0
| 3,820 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
# Lint as: python3
"""Filters an access trace.
Given a CSV file containing (pc, address) in hex, filters the file to only
include the desired cache set accesses and splits the resulting trace into
train (80%) / valid (10%) / test (10%).
Example usage:
Suppose that the access trace exists at /path/to/file.csv
Results in the following three files: train.csv, valid.csv, test.csv.
python3 filter.py /path/to/file.csv
"""
import argparse
import csv
import os
import subprocess
import numpy as np
import tqdm
if __name__ == "__main__":
# The cache sets used in the paper:
# An Imitation Learning Approach to Cache Replacement
PAPER_CACHE_SETS = [6, 35, 38, 53, 67, 70, 113, 143, 157, 196, 287, 324, 332,
348, 362, 398, 406, 456, 458, 488, 497, 499, 558, 611,
718, 725, 754, 775, 793, 822, 862, 895, 928, 1062, 1086,
1101, 1102, 1137, 1144, 1175, 1210, 1211, 1223, 1237,
1268, 1308, 1342, 1348, 1353, 1424, 1437, 1456, 1574,
1599, 1604, 1662, 1683, 1782, 1789, 1812, 1905, 1940,
1967, 1973]
parser = argparse.ArgumentParser()
parser.add_argument(
"access_trace_filename", help="Local path to the access trace to filter.")
parser.add_argument(
"-s", "--cache_sets", default=PAPER_CACHE_SETS,
help=("Specifies which cache sets to keep. Defaults to the 64 sets used"
" in the paper."))
parser.add_argument(
"-a", "--associativity", default=16,
help="Associativity of the cache.")
parser.add_argument(
"-c", "--capacity", default=2 * 1024 * 1024,
help="Capacity of the cache.")
parser.add_argument(
"-l", "--cache_line_size", default=64,
help="Size of the cache lines in bytes.")
parser.add_argument(
"-b", "--batch_size", default=32,
help=("Ensures that train.csv, valid.csv, and test.csv contain a number"
" of accesses that is a multiple of this. Use 1 to avoid this."))
args = parser.parse_args()
PREFIX = "_filter_traces"
output_filenames = ["train.csv", "valid.csv", "test.csv", "all.csv"]
output_filenames += [PREFIX + str(i) for i in range(10)]
for output_filename in output_filenames:
if os.path.exists(output_filename):
raise ValueError(f"File {output_filename} already exists.")
num_cache_lines = args.capacity // args.cache_line_size
num_sets = num_cache_lines // args.associativity
cache_bits = int(np.log2(args.cache_line_size))
set_bits = int(np.log2(num_sets))
num_lines = 0
accepted_cache_sets = set(args.cache_sets)
with open("all.csv", "w") as write:
with open(args.access_trace_filename, "r") as read:
for pc, address in tqdm.tqdm(csv.reader(read)):
pc = int(pc, 16)
address = int(address, 16)
aligned_address = address >> cache_bits
set_id = aligned_address & ((1 << set_bits) - 1)
if set_id in accepted_cache_sets:
num_lines += 1
write.write(f"0x{pc:x},0x{address:x}\n")
split_length = num_lines // 10
# Make split_length a multiple of batch_size
split_length = split_length // args.batch_size * args.batch_size
cmd = f"split -l {split_length} --numeric-suffixes all.csv {PREFIX}"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
# Removes the extra accesses that don't fit into batch_size multiples.
cmd = f"wc -l {PREFIX}10"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
cmd = f"rm {PREFIX}10"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
# Last split is test
# Second last split is valid
# First 8 splits are train
cmd = f"mv {PREFIX}09 test.csv"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
cmd = f"mv {PREFIX}08 valid.csv"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
cmd = f"cat {PREFIX}0[0-7] > train.csv"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
# Clean up
cmd = f"rm {PREFIX}0[0-7]"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
cmd = "rm all.csv"
print(cmd)
subprocess.run(cmd, check=True, shell=True)
|
google-research/google-research
|
cache_replacement/policy_learning/cache/traces/train_test_split.py
|
Python
|
apache-2.0
| 4,756 |
# Enter your code here. Read input from STDIN. Print output to STDOUT
t = int(input())
for _ in range(t):
n_A = int(input())
set_A = set(map(int, input().split()))
n_B = int(input())
set_B = set(map(int, input().split()))
print(set_A.issubset(set_B))
|
MithileshCParab/HackerRank-10DaysOfStatistics
|
Python/Sets/check_subset.py
|
Python
|
apache-2.0
| 287 |
# Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
amqp1_opts = [
cfg.StrOpt('server_request_prefix',
default='exclusive',
deprecated_group='amqp1',
help="address prefix used when sending to a specific server"),
cfg.StrOpt('broadcast_prefix',
default='broadcast',
deprecated_group='amqp1',
help="address prefix used when broadcasting to all servers"),
cfg.StrOpt('group_request_prefix',
default='unicast',
deprecated_group='amqp1',
help="address prefix when sending to any server in group"),
cfg.StrOpt('container_name',
default=None,
deprecated_group='amqp1',
help='Name for the AMQP container'),
cfg.IntOpt('idle_timeout',
default=0, # disabled
deprecated_group='amqp1',
help='Timeout for inactive connections (in seconds)'),
cfg.BoolOpt('trace',
default=False,
deprecated_group='amqp1',
help='Debug: dump AMQP frames to stdout'),
cfg.StrOpt('ssl_ca_file',
default='',
deprecated_group='amqp1',
help="CA certificate PEM file to verify server certificate"),
cfg.StrOpt('ssl_cert_file',
default='',
deprecated_group='amqp1',
help='Identifying certificate PEM file to present to clients'),
cfg.StrOpt('ssl_key_file',
default='',
deprecated_group='amqp1',
help='Private key PEM file used to sign cert_file certificate'),
cfg.StrOpt('ssl_key_password',
default=None,
deprecated_group='amqp1',
help='Password for decrypting ssl_key_file (if encrypted)'),
cfg.BoolOpt('allow_insecure_clients',
default=False,
deprecated_group='amqp1',
help='Accept clients using either SSL or plain TCP'),
cfg.StrOpt('sasl_mechanisms',
default='',
deprecated_group='amqp1',
help='Space separated list of acceptable SASL mechanisms'),
cfg.StrOpt('sasl_config_dir',
default='',
deprecated_group='amqp1',
help='Path to directory that contains the SASL configuration'),
cfg.StrOpt('sasl_config_name',
default='',
deprecated_group='amqp1',
help='Name of configuration file (without .conf suffix)'),
cfg.StrOpt('username',
default='',
deprecated_group='amqp1',
help='User name for message broker authentication'),
cfg.StrOpt('password',
default='',
deprecated_group='amqp1',
help='Password for message broker authentication')
]
|
magic0704/oslo.messaging
|
oslo_messaging/_drivers/protocols/amqp/opts.py
|
Python
|
apache-2.0
| 3,495 |
#!/usr/bin/env python
#
# Copyright 2015 Ravello Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected]'
from ravellobmc import RavelloBmc
from ravellobmc import IPMI_PORT
from ravello_sdk import message_queue
import subprocess
import threading
import time
import unittest
my_bmc = None
my_thread = None
def start_bmc():
global my_bmc
my_bmc = RavelloBmc({'admin': 'password'},
port=IPMI_PORT,
address='127.0.0.1',
aspect="design",
username='ravello_user',
password='ravello_pass',
app_name='foo',
vm_name='bar')
my_bmc.connect()
my_bmc.listen()
def is_listening():
cmd = subprocess.Popen('netstat -u -p -l',
shell=True,
stdout=subprocess.PIPE)
output = cmd.communicate()[0]
pattern = 'localhost:asf-rmcp'
if pattern in output:
return True
return False
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("setup")
global my_thread
my_thread = threading.Thread(target=start_bmc)
my_thread.start()
# We won't modify pyghmi too much by adding a condition variable
# linked to the bmc.BMC listen method so we just use a bounded sleep
while not is_listening():
time.sleep(0.1)
@classmethod
def tearDownClass(cls):
global t
# Again we do not want to modify pyghmi too much so use this internal
# threading call to stop the BMC thread
my_thread._Thread__stop()
my_thread.join()
def ipmi(self, cmd):
return subprocess.call(['ipmitool',
'-I',
'lanplus',
'-U',
'admin',
'-P',
'password',
'-H',
'127.0.0.1',
'power',
cmd])
def test_01_init(self):
assert(message_queue.pop(0) == 'login: ravello_user, ravello_pass')
assert(message_queue.pop(0) == 'get_application: foo design')
def test_02_poweroff(self):
result = self.ipmi('off')
assert(result == 0)
assert(message_queue.pop(0) == 'poweroff_vm')
def test_03_poweron(self):
result = self.ipmi('on')
assert(result == 0)
assert(message_queue.pop(0) == 'start_vm')
def test_04_shutdown(self):
result = self.ipmi('soft')
assert(result == 0)
assert(message_queue.pop(0) == 'stop_vm')
def test_05_powerstate(self):
result = self.ipmi('status')
assert(result == 0)
assert(message_queue.pop(0) == 'get_application: foo design')
if __name__ == '__main__':
unittest.main()
|
benoit-canet/ravellobmc
|
test.py
|
Python
|
apache-2.0
| 3,544 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1beta1FlowSchemaSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'distinguisher_method': 'V1beta1FlowDistinguisherMethod',
'matching_precedence': 'int',
'priority_level_configuration': 'V1beta1PriorityLevelConfigurationReference',
'rules': 'list[V1beta1PolicyRulesWithSubjects]'
}
attribute_map = {
'distinguisher_method': 'distinguisherMethod',
'matching_precedence': 'matchingPrecedence',
'priority_level_configuration': 'priorityLevelConfiguration',
'rules': 'rules'
}
def __init__(self, distinguisher_method=None, matching_precedence=None, priority_level_configuration=None, rules=None, local_vars_configuration=None): # noqa: E501
"""V1beta1FlowSchemaSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._distinguisher_method = None
self._matching_precedence = None
self._priority_level_configuration = None
self._rules = None
self.discriminator = None
if distinguisher_method is not None:
self.distinguisher_method = distinguisher_method
if matching_precedence is not None:
self.matching_precedence = matching_precedence
self.priority_level_configuration = priority_level_configuration
if rules is not None:
self.rules = rules
@property
def distinguisher_method(self):
"""Gets the distinguisher_method of this V1beta1FlowSchemaSpec. # noqa: E501
:return: The distinguisher_method of this V1beta1FlowSchemaSpec. # noqa: E501
:rtype: V1beta1FlowDistinguisherMethod
"""
return self._distinguisher_method
@distinguisher_method.setter
def distinguisher_method(self, distinguisher_method):
"""Sets the distinguisher_method of this V1beta1FlowSchemaSpec.
:param distinguisher_method: The distinguisher_method of this V1beta1FlowSchemaSpec. # noqa: E501
:type: V1beta1FlowDistinguisherMethod
"""
self._distinguisher_method = distinguisher_method
@property
def matching_precedence(self):
"""Gets the matching_precedence of this V1beta1FlowSchemaSpec. # noqa: E501
`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
:return: The matching_precedence of this V1beta1FlowSchemaSpec. # noqa: E501
:rtype: int
"""
return self._matching_precedence
@matching_precedence.setter
def matching_precedence(self, matching_precedence):
"""Sets the matching_precedence of this V1beta1FlowSchemaSpec.
`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501
:param matching_precedence: The matching_precedence of this V1beta1FlowSchemaSpec. # noqa: E501
:type: int
"""
self._matching_precedence = matching_precedence
@property
def priority_level_configuration(self):
"""Gets the priority_level_configuration of this V1beta1FlowSchemaSpec. # noqa: E501
:return: The priority_level_configuration of this V1beta1FlowSchemaSpec. # noqa: E501
:rtype: V1beta1PriorityLevelConfigurationReference
"""
return self._priority_level_configuration
@priority_level_configuration.setter
def priority_level_configuration(self, priority_level_configuration):
"""Sets the priority_level_configuration of this V1beta1FlowSchemaSpec.
:param priority_level_configuration: The priority_level_configuration of this V1beta1FlowSchemaSpec. # noqa: E501
:type: V1beta1PriorityLevelConfigurationReference
"""
if self.local_vars_configuration.client_side_validation and priority_level_configuration is None: # noqa: E501
raise ValueError("Invalid value for `priority_level_configuration`, must not be `None`") # noqa: E501
self._priority_level_configuration = priority_level_configuration
@property
def rules(self):
"""Gets the rules of this V1beta1FlowSchemaSpec. # noqa: E501
`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
:return: The rules of this V1beta1FlowSchemaSpec. # noqa: E501
:rtype: list[V1beta1PolicyRulesWithSubjects]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1beta1FlowSchemaSpec.
`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501
:param rules: The rules of this V1beta1FlowSchemaSpec. # noqa: E501
:type: list[V1beta1PolicyRulesWithSubjects]
"""
self._rules = rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1FlowSchemaSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1FlowSchemaSpec):
return True
return self.to_dict() != other.to_dict()
|
kubernetes-client/python
|
kubernetes/client/models/v1beta1_flow_schema_spec.py
|
Python
|
apache-2.0
| 8,042 |
class Alarm(object):
id = ''
uid = 0
note_id = ''
date = 0
update_date = 0
is_deleted = 0
#note = None()
|
ThinkmanWang/NotesServer
|
models/Alarm.py
|
Python
|
apache-2.0
| 151 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
from polyaxon.polyboard.artifacts import V1ArtifactKind, V1RunArtifact
from polyaxon.utils.fqn_utils import to_fqn_name
def collect_lineage_artifacts_path(artifact_path: str) -> Optional[V1RunArtifact]:
name = os.path.basename(artifact_path)
return V1RunArtifact(
name=to_fqn_name(name),
kind=V1ArtifactKind.DIR,
path=artifact_path,
summary={"path": artifact_path},
is_input=True,
)
|
polyaxon/polyaxon
|
core/polyaxon/polypod/compiler/lineage/artifacts_collector.py
|
Python
|
apache-2.0
| 1,081 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2018-12-04 22:15
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
def create_ids(apps, schema_editor):
Launch = apps.get_model('api', 'Launch')
for m in Launch.objects.all():
m.new_id = uuid.uuid4()
m.save()
def remove_ids(apps, schema_editor):
Launch = apps.get_model('api', 'Launch')
for m in Launch.objects.all():
m.uuid = None
m.save()
class Migration(migrations.Migration):
dependencies = [
('api', '0019_auto_20181206_0135'),
]
operations = [
migrations.AddField(
model_name='launch',
name='new_id',
field=models.UUIDField(default=uuid.uuid4),
),
migrations.RunPython(code=create_ids, reverse_code=remove_ids),
]
|
ItsCalebJones/SpaceLaunchNow-Server
|
api/migrations/0020_launch_new_id.py
|
Python
|
apache-2.0
| 860 |
def bfs(node, target, comparator=lambda x, y: x == y):
queue = [node]
visited_nodes = []
while len(queue) != 0:
current_node = queue.pop(0)
if current_node not in visited_nodes:
if comparator(current_node.value, target):
return current_node
queue.extend(current_node)
visited_nodes.append(current_node)
return None
def dfs(node, target, comparator=lambda x, y: x == y):
queue = [node]
visited_nodes = []
while len(queue) != 0:
current_node = queue.pop()
if current_node not in visited_nodes:
if comparator(current_node.value, target):
return current_node
queue.extend(current_node)
visited_nodes.append(current_node)
return None
def dls(node, target, limit, comparator=lambda x, y: x == y):
queue = [(node, 0)]
visited_nodes = []
max_level = 0
while len(queue) != 0:
current_node, current_node_level = queue.pop()
max_level = max(max_level, current_node_level)
if current_node_level <= limit and current_node not in visited_nodes:
if comparator(current_node.value, target):
return current_node, current_node_level
if current_node_level < limit:
queue.extend([(child, current_node_level + 1) for child in current_node])
visited_nodes.append(current_node)
return None, max_level
def iterative_deepening_search(node, target, comparator=lambda x, y: x == y):
level = 0
found_level = 0
while level == found_level:
level += 1
result, found_level = dls(node, target, level, comparator)
if result is not None:
return result
return None
|
ocozalp/Algorithms
|
search/uninformed_search.py
|
Python
|
apache-2.0
| 1,797 |
# -*- coding: utf-8 -*-
"""
Sphinx plugins for Django documentation.
"""
import os
import re
from docutils import nodes, transforms
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
json = None
from sphinx import addnodes, roles, __version__ as sphinx_ver
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag"
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter"
)
app.add_crossref_type(
directivename = "fieldlookup",
rolename = "lookup",
indextemplate = "pair: %s; field lookup type",
)
app.add_description_unit(
directivename = "django-admin",
rolename = "djadmin",
indextemplate = "pair: %s; django-admin command",
parse_node = parse_django_admin_node,
)
app.add_description_unit(
directivename = "django-admin-option",
rolename = "djadminopt",
indextemplate = "pair: %s; django-admin command-line option",
parse_node = parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
arg0 = self.arguments[0]
is_nextversion = env.config.django_next_version == arg0
ret = []
node = addnodes.versionmodified()
ret.append(node)
if not is_nextversion:
if len(self.arguments) == 1:
linktext = u'Пожалуйста, обратитесь к описанию релиза </releases/%s>' % (arg0)
xrefs = roles.XRefRole()('doc', linktext, linktext, self.lineno, self.state)
node.extend(xrefs[0])
node['version'] = arg0
else:
node['version'] = "Development version"
node['type'] = self.name
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1], self.lineno+1)
node.extend(inodes)
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
ret = ret + messages
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
# <big>? Really?
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accomodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'deprecated': u'Устарело в Django %s',
'versionchanged': u'Изменено в Django %s',
'versionadded': u'Добавлено в Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
title = "%s%s" % (
self.version_text[node['type']] % node['version'],
len(node) and ":" or "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
if json is None:
self.warn("cannot create templatebuiltins.js due to missing simplejson dependency")
return
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
f = open(outfilename, 'wb')
f.write('var django_template_builtins = ')
json.dump(templatebuiltins, f)
f.write(';\n')
f.close()
|
RaD/django-south
|
docs/djbook/_ext/djbookdocs.py
|
Python
|
apache-2.0
| 7,983 |
##########################################################################
#
# Processor specific code
# CPU = "Z80"
# Description = "Zilog 8-bit microprocessor."
# DataWidth = 8 # 8-bit data
# AddressWidth = 16 # 16-bit addresses
# Maximum length of an instruction (for formatting purposes)
maxLength = 4
# Leadin bytes for multibyte instructions
leadInBytes = [0xcb, 0xdd, 0xed, 0xfd]
# Addressing mode table
# List of addressing modes and corresponding format strings for operands.
addressModeTable = {
"implied" : "",
"0" : "0",
"0,a" : "0,a",
"0,b" : "0,b",
"0,c" : "0,c",
"0,d" : "0,d",
"0,e" : "0,e",
"0,h" : "0,h",
"0,indhl" : "0,(hl)",
"0,l" : "0,l",
"00" : "$00",
"08" : "$08",
"1" : "1",
"1,a" : "1,a",
"1,b" : "1,b",
"1,c" : "1,c",
"1,d" : "1,d",
"1,e" : "1,e",
"1,h" : "1,h",
"1,indhl" : "1,(hl)",
"1,l" : "1,l",
"10" : "$10",
"18" : "$18",
"2" : "2",
"2,a" : "2,a",
"2,b" : "2,b",
"2,c" : "2,c",
"2,d" : "2,d",
"2,e" : "2,e",
"2,h" : "2,h",
"2,indhl" : "2,(hl)",
"2,l" : "2,l",
"20" : "$20",
"28" : "$28",
"3,a" : "3,a",
"3,b" : "3,b",
"3,c" : "3,c",
"3,d" : "3,d",
"3,e" : "3,e",
"3,h" : "3,h",
"3,indhl" : "3,(hl)",
"3,l" : "3,l",
"30" : "$30",
"38" : "$38",
"4,a" : "4,a",
"4,b" : "4,b",
"4,c" : "4,c",
"4,d" : "4,d",
"4,e" : "4,e",
"4,h" : "4,h",
"4,indhl" : "4,(hl)",
"4,l" : "4,l",
"5,a" : "5,a",
"5,b" : "5,b",
"5,c" : "5,c",
"5,d" : "5,d",
"5,e" : "5,e",
"5,h" : "5,h",
"5,indhl" : "5,(hl)",
"5,l" : "5,l",
"6,a" : "6,a",
"6,b" : "6,b",
"6,c" : "6,c",
"6,d" : "6,d",
"6,e" : "6,e",
"6,h" : "6,h",
"6,indhl" : "6,(hl)",
"6,l" : "6,l",
"7,a" : "7,a",
"7,b" : "7,b",
"7,c" : "7,c",
"7,d" : "7,d",
"7,e" : "7,e",
"7,h" : "7,h",
"7,indhl" : "7,(hl)",
"7,l" : "7,l",
"a" : "a",
"a,a" : "a,a",
"a,b" : "a,b",
"a,c" : "a,c",
"a,d" : "a,d",
"a,e" : "a,e",
"a,h" : "a,h",
"a,i" : "a,i",
"a,indbc" : "a,(bc)",
"a,indc" : "a,(c)",
"a,indde" : "a,(de)",
"a,indhl" : "a,(hl)",
"a,indix+d" : "a,(ix+${0:02X})",
"a,indiy+d" : "a,(iy+${0:02X})",
"a,indn" : "a,(${0:02X})",
"a,indnn" : "a,(${1:02X}{0:02X})",
"a,l" : "a,l",
"a,n" : "a,${0:02X}",
"a,r" : "a,r",
"af" : "af",
"af,af'" : "af,af'",
"b" : "b",
"b,a" : "b,a",
"b,b" : "b,b",
"b,c" : "b,c",
"b,d" : "b,d",
"b,e" : "b,e",
"b,h" : "b,h",
"b,indc" : "b,(c)",
"b,indhl" : "b,(hl)",
"b,indix+d" : "b,(ix+${0:02X})",
"b,indiy+d" : "b,(iy+${0:02X})",
"b,l" : "b,l",
"b,n" : "b,${0:02X}",
"bc" : "bc",
"bc,indaa" : "bc,(${1:02X}{0:02X})",
"bc,nn" : "bc,${1:02X}{0:02X}",
"c" : "c",
"c,a" : "c,a",
"c,b" : "c,b",
"c,c" : "c,c",
"c,d" : "c,d",
"c,e" : "c,e",
"c,h" : "c,h",
"c,indc" : "c,(c)",
"c,indhl" : "c,(hl)",
"c,indix+d" : "c,(ix+${0:02X})",
"c,indiy+d" : "c,(iy+${0:02X})",
"c,l" : "c,l",
"c,n" : "c,${0:02X}",
"c,pcr" : "c,${0:04X}",
"c,nn" : "c,${1:02X}{0:02X}",
"d" : "d",
"d,a" : "d,a",
"d,b" : "d,b",
"d,c" : "d,c",
"d,d" : "d,d",
"d,e" : "d,e",
"d,h" : "d,h",
"d,indc" : "d,(c)",
"d,indhl" : "d,(hl)",
"d,indix+d" : "d,(ix+${0:02X})",
"d,indiy+d" : "d,(iy+${0:02X})",
"d,l" : "d,l",
"d,n" : "d,${0:02X}",
"de" : "de",
"de,hl" : "de,hl",
"de,indaa" : "de,(${1:02X}{0:02X})",
"de,nn" : "de,${1:02X}{0:02X}",
"e" : "e",
"e,a" : "e,a",
"e,b" : "e,b",
"e,c" : "e,c",
"e,d" : "e,d",
"e,e" : "e,e",
"e,h" : "e,h",
"e,indc" : "e,(c)",
"e,indhl" : "e,(hl)",
"e,indix+d" : "e,(ix+${0:02X})",
"e,indiy+d" : "e,(iy+${0:02X})",
"e,l" : "e,l",
"e,n" : "e,${0:02X}",
"h" : "h",
"h,a" : "h,a",
"h,b" : "h,b",
"h,c" : "h,c",
"h,d" : "h,d",
"h,e" : "h,e",
"h,h" : "h,h",
"h,indc" : "h,(c)",
"h,indhl" : "h,(hl)",
"h,indix+d" : "h,(ix+${0:02X})",
"h,indiy+d" : "h,(iy+${0:02X})",
"h,l" : "h,l",
"h,n" : "h,${0:02X}",
"hl" : "hl",
"hl,bc" : "hl,bc",
"hl,de" : "hl,de",
"hl,hl" : "hl,hl",
"hl,indnn" : "hl,(${1:02X}{0:02X})",
"hl,nn" : "hl,${1:02X}{0:02X}",
"hl,sp" : "hl,sp",
"i,a" : "i,a",
"indaa,bc" : "(${1:02X}{0:02X}),bc",
"indaa,de" : "(${1:02X}{0:02X}),de",
"indaa,ix" : "(${1:02X}{0:02X}),ix",
"indaa,iy" : "(${1:02X}{0:02X}),iy",
"indaa,sp" : "(${1:02X}{0:02X}),sp",
"indbc,a" : "(bc),a",
"indc,a" : "(c),a",
"indc,b" : "(c),b",
"indc,c" : "(c),c",
"indc,d" : "(c),d",
"indc,e" : "(c),e",
"indc,h" : "(c),h",
"indc,l" : "(c),l",
"indde,a" : "(de),a",
"indhl" : "(hl)",
"indhl,a" : "(hl),a",
"indhl,b" : "(hl),b",
"indhl,c" : "(hl),c",
"indhl,d" : "(hl),d",
"indhl,e" : "(hl),e",
"indhl,h" : "(hl),h",
"indhl,l" : "(hl),l",
"indhl,n" : "(hl),${0:02X}",
"indix+d" : "(ix+${0:02X})",
"indix+d,a" : "(ix+${0:02X}),a",
"indiy+d,a" : "(iy+${0:02X}),a",
"indix+d,b" : "(ix+${0:02X}),b",
"indix+d,c" : "(ix+${0:02X}),c",
"indix+d,d" : "(ix+${0:02X}),d",
"indix+d,e" : "(ix+${0:02X}),e",
"indix+d,h" : "(ix+${0:02X}),h",
"indix+d,l" : "(ix+${0:02X}),l",
"indix+d,n" : "(ix+${0:02X}),${1:02X}",
"indiy+d" : "(iy+${0:02X})",
"indiy+d,b" : "(iy+${0:02X}),b",
"indiy+d,c" : "(iy+${0:02X}),c",
"indiy+d,d" : "(iy+${0:02X}),d",
"indiy+d,e" : "(iy+${0:02X}),e",
"indiy+d,h" : "(iy+${0:02X}),h",
"indiy+d,l" : "(iy+${0:02X}),l",
"indiy+d,n" : "(iy+${0:02X}),${1:02X}",
"indn,a" : "(${0:02X}),a",
"indnn,a" : "(${1:02X}{0:02X}),a",
"indnn,hl" : "(${1:02X}{0:02X}),hl",
"indsp,hl" : "(sp),hl",
"ix" : "ix",
"ix,aa" : "ix,${1:02X}{0:02X}",
"ix,bc" : "ix,bc",
"ix,de" : "ix,de",
"ix,indaa" : "ix,(${1:02X}{0:02X})",
"ix,ix" : "ix,ix",
"ix,sp" : "ix,sp",
"iy" : "iy",
"iy,aa" : "iy,${1:02X}{0:02X}",
"iy,bc" : "iy,bc",
"iy,bc" : "iy,bc",
"iy,de" : "iy,de",
"iy,indaa" : "iy,(${1:02X}{0:02X})",
"iy,indaa" : "iy,(${1:02X}{0:02X})",
"iy,iy" : "iy,iy",
"iy,sp" : "iy,sp",
"l" : "l",
"l,a" : "l,a",
"l,b" : "l,b",
"l,c" : "l,c",
"l,d" : "l,d",
"l,e" : "l,e",
"l,h" : "l,h",
"l,indc" : "l,(c)",
"l,indhl" : "l,(hl)",
"l,indix+d" : "l,(ix+${0:02X})",
"l,indiy+d" : "l,(iy+${0:02X})",
"l,l" : "l,l",
"l,n" : "l,${0:02X}",
"m" : "m",
"m,nn" : "m,${1:02X}{0:02X}",
"n" : "${0:02X}",
"n,pcr" : "${0:04X}",
"n,indix+d" : "n,(ix+${0:02X})",
"n,indiy+d" : "n,(iy+${0:02X})",
"nc" : "nc",
"nc,pcr" : "nc,${0:04X}",
"nc,nn" : "nc,${1:02X}{0:02X}",
"nn" : "${1:02X}{0:02X}",
"nz" : "nz",
"nz,pcr" : "nz,${0:04X}",
"nz,nn" : "nz,${1:02X}{0:02X}",
"p" : "p",
"p,nn" : "p,${1:02X}{0:02X}",
"pcr" : "${0:04X}",
"pe" : "pe",
"pe,nn" : "pe,${1:02X}{0:02X}",
"po" : "po",
"po,nn" : "po,${1:02X}{0:02X}",
"r,a" : "r,a",
"sp" : "sp",
"sp,hl" : "sp,hl",
"sp,indaa" : "sp,(${1:02X}{0:02X})",
"sp,nn" : "sp,${1:02X}{0:02X}",
"z" : "z",
"z,pcr" : "z,${0:04X}",
"z,nn" : "z,${1:02X}{0:02X}",
}
# Op Code Table
# Key is numeric opcode (possibly multiple bytes)
# Value is a list:
# # bytes
# mnemonic
# addressing mode
# flags (e.g. pcr)
opcodeTable = {
0x00 : [ 1, "nop", "implied" ],
0x01 : [ 3, "ld", "bc,nn" ],
0x02 : [ 1, "ld", "indbc,a" ],
0x03 : [ 1, "inc", "bc" ],
0x04 : [ 1, "inc", "b" ],
0x05 : [ 1, "dec", "b" ],
0x06 : [ 2, "ld", "b,n" ],
0x07 : [ 1, "rlca", "implied" ],
0x08 : [ 1, "ex", "af,af'" ],
0x09 : [ 1, "add", "hl,bc" ],
0x0a : [ 1, "ld", "a,indbc" ],
0x0b : [ 1, "dec", "bc" ],
0x0c : [ 1, "inc", "c" ],
0x0d : [ 1, "dec", "c" ],
0x0e : [ 2, "ld", "c,n" ],
0x0f : [ 1, "rrca", "implied" ],
0x10 : [ 2, "djnz", "pcr", pcr ],
0x11 : [ 3, "ld", "de,nn" ],
0x12 : [ 1, "ld", "indde,a" ],
0x13 : [ 1, "inc", "de" ],
0x14 : [ 1, "inc", "d" ],
0x15 : [ 1, "dec", "d" ],
0x16 : [ 2, "ld", "d,n" ],
0x17 : [ 1, "rla", "implied" ],
0x18 : [ 2, "jr", "pcr", pcr ],
0x19 : [ 1, "add", "hl,de" ],
0x1a : [ 1, "ld", "a,indde" ],
0x1b : [ 1, "dec", "de" ],
0x1c : [ 1, "inc", "e" ],
0x1d : [ 1, "dec", "e" ],
0x1e : [ 2, "ld", "e,n" ],
0x1f : [ 1, "rra", "implied" ],
0x20 : [ 2, "jr", "nz,pcr", pcr ],
0x21 : [ 3, "ld", "hl,nn" ],
0x22 : [ 3, "ld", "indnn,hl" ],
0x23 : [ 1, "inc", "hl" ],
0x24 : [ 1, "inc", "h" ],
0x25 : [ 1, "dec", "h" ],
0x26 : [ 2, "ld", "h,n" ],
0x27 : [ 1, "daa", "implied" ],
0x28 : [ 2, "jr", "z,pcr", pcr ],
0x29 : [ 1, "add", "hl,hl" ],
0x2a : [ 3, "ld", "hl,indnn" ],
0x2b : [ 1, "dec", "hl" ],
0x2c : [ 1, "inc", "l" ],
0x2d : [ 1, "dec", "l" ],
0x2e : [ 2, "ld", "l,n" ],
0x2f : [ 1, "cpl", "implied" ],
0x30 : [ 2, "jr", "nc,pcr", pcr ],
0x31 : [ 3, "ld", "sp,nn" ],
0x32 : [ 3, "ld", "indnn,a" ],
0x33 : [ 1, "inc", "sp" ],
0x34 : [ 1, "inc", "indhl" ],
0x35 : [ 1, "dec", "indhl" ],
0x36 : [ 2, "ld", "indhl,n" ],
0x37 : [ 1, "scf", "implied" ],
0x38 : [ 2, "jr", "c,pcr", pcr ],
0x39 : [ 1, "add", "hl,sp" ],
0x3a : [ 3, "ld", "a,indnn" ],
0x3b : [ 1, "dec", "sp" ],
0x3c : [ 1, "inc", "a" ],
0x3d : [ 1, "dec", "a" ],
0x3e : [ 2, "ld", "a,n" ],
0x3f : [ 1, "ccf", "implied" ],
0x40 : [ 1, "ld", "b,b" ],
0x41 : [ 1, "ld", "b,c" ],
0x42 : [ 1, "ld", "b,d" ],
0x43 : [ 1, "ld", "b,e" ],
0x44 : [ 1, "ld", "b,h" ],
0x45 : [ 1, "ld", "b,l" ],
0x46 : [ 1, "ld", "b,indhl" ],
0x47 : [ 1, "ld", "b,a" ],
0x48 : [ 1, "ld", "c,b" ],
0x49 : [ 1, "ld", "c,c" ],
0x4a : [ 1, "ld", "c,d" ],
0x4b : [ 1, "ld", "c,e" ],
0x4c : [ 1, "ld", "c,h" ],
0x4d : [ 1, "ld", "c,l" ],
0x4e : [ 1, "ld", "c,indhl" ],
0x4f : [ 1, "ld", "c,a" ],
0x50 : [ 1, "ld", "d,b" ],
0x51 : [ 1, "ld", "d,c" ],
0x52 : [ 1, "ld", "d,d" ],
0x53 : [ 1, "ld", "d,e" ],
0x54 : [ 1, "ld", "d,h" ],
0x55 : [ 1, "ld", "d,l" ],
0x56 : [ 1, "ld", "d,indhl" ],
0x57 : [ 1, "ld", "d,a" ],
0x58 : [ 1, "ld", "e,b" ],
0x59 : [ 1, "ld", "e,c" ],
0x5a : [ 1, "ld", "e,d" ],
0x5b : [ 1, "ld", "e,e" ],
0x5c : [ 1, "ld", "e,h" ],
0x5d : [ 1, "ld", "e,l" ],
0x5e : [ 1, "ld", "e,indhl" ],
0x5f : [ 1, "ld", "e,a" ],
0x60 : [ 1, "ld", "h,b" ],
0x61 : [ 1, "ld", "h,c" ],
0x62 : [ 1, "ld", "h,d" ],
0x63 : [ 1, "ld", "h,e" ],
0x64 : [ 1, "ld", "h,h" ],
0x65 : [ 1, "ld", "h,l" ],
0x66 : [ 1, "ld", "h,indhl" ],
0x67 : [ 1, "ld", "h,a" ],
0x68 : [ 1, "ld", "l,b" ],
0x69 : [ 1, "ld", "l,c" ],
0x6a : [ 1, "ld", "l,d" ],
0x6b : [ 1, "ld", "l,e" ],
0x6c : [ 1, "ld", "l,h" ],
0x6d : [ 1, "ld", "l,l" ],
0x6e : [ 1, "ld", "l,indhl" ],
0x6f : [ 1, "ld", "l,a" ],
0x70 : [ 1, "ld", "indhl,b" ],
0x71 : [ 1, "ld", "indhl,c" ],
0x72 : [ 1, "ld", "indhl,d" ],
0x73 : [ 1, "ld", "indhl,e" ],
0x74 : [ 1, "ld", "indhl,h" ],
0x75 : [ 1, "ld", "indhl,l" ],
0x76 : [ 1, "halt", "implied" ],
0x77 : [ 1, "ld", "indhl,a" ],
0x78 : [ 1, "ld", "a,b" ],
0x79 : [ 1, "ld", "a,c" ],
0x7a : [ 1, "ld", "a,d" ],
0x7b : [ 1, "ld", "a,e" ],
0x7c : [ 1, "ld", "a,h" ],
0x7d : [ 1, "ld", "a,l" ],
0x7e : [ 1, "ld", "a,indhl" ],
0x7f : [ 1, "ld", "a,a" ],
0x80 : [ 1, "add", "a,b" ],
0x81 : [ 1, "add", "a,c" ],
0x82 : [ 1, "add", "a,d" ],
0x83 : [ 1, "add", "a,e" ],
0x84 : [ 1, "add", "a,h" ],
0x85 : [ 1, "add", "a,l" ],
0x86 : [ 1, "add", "a,indhl" ],
0x87 : [ 1, "add", "a,a" ],
0x88 : [ 1, "adc", "a,b" ],
0x89 : [ 1, "adc", "a,c" ],
0x8a : [ 1, "adc", "a,d" ],
0x8b : [ 1, "adc", "a,e" ],
0x8c : [ 1, "adc", "a,h" ],
0x8d : [ 1, "adc", "a,l" ],
0x8e : [ 1, "adc", "a,indhl" ],
0x8f : [ 1, "adc", "a,a" ],
0x90 : [ 1, "sub", "b" ],
0x91 : [ 1, "sub", "c" ],
0x92 : [ 1, "sub", "d" ],
0x93 : [ 1, "sub", "e" ],
0x94 : [ 1, "sub", "h" ],
0x95 : [ 1, "sub", "l" ],
0x96 : [ 1, "sub", "indhl" ],
0x97 : [ 1, "sub", "a" ],
0x98 : [ 1, "sbc", "a,b" ],
0x99 : [ 1, "sbc", "a,c" ],
0x9a : [ 1, "sbc", "a,d" ],
0x9b : [ 1, "sbc", "a,e" ],
0x9c : [ 1, "sbc", "a,h" ],
0x9d : [ 1, "sbc", "a,l" ],
0x9e : [ 1, "sbc", "a,indhl" ],
0x9f : [ 1, "sbc", "a,a" ],
0xa0 : [ 1, "and", "b" ],
0xa1 : [ 1, "and", "c" ],
0xa2 : [ 1, "and", "d" ],
0xa3 : [ 1, "and", "e" ],
0xa4 : [ 1, "and", "h" ],
0xa5 : [ 1, "and", "l" ],
0xa6 : [ 1, "and", "indhl" ],
0xa7 : [ 1, "and", "a" ],
0xa8 : [ 1, "xor", "b" ],
0xa9 : [ 1, "xor", "c" ],
0xaa : [ 1, "xor", "d" ],
0xab : [ 1, "xor", "e" ],
0xac : [ 1, "xor", "h" ],
0xad : [ 1, "xor", "l" ],
0xae : [ 1, "xor", "indhl" ],
0xaf : [ 1, "xor", "a" ],
0xb0 : [ 1, "or", "b" ],
0xb1 : [ 1, "or", "c" ],
0xb2 : [ 1, "or", "d" ],
0xb3 : [ 1, "or", "e" ],
0xb4 : [ 1, "or", "h" ],
0xb5 : [ 1, "or", "l" ],
0xb6 : [ 1, "or", "indhl" ],
0xb7 : [ 1, "or", "a" ],
0xb8 : [ 1, "cp", "b" ],
0xb9 : [ 1, "cp", "c" ],
0xba : [ 1, "cp", "d" ],
0xbb : [ 1, "cp", "e" ],
0xbc : [ 1, "cp", "h" ],
0xbd : [ 1, "cp", "l" ],
0xbe : [ 1, "cp", "indhl" ],
0xbf : [ 1, "cp", "a" ],
0xc0 : [ 1, "ret", "nz" ],
0xc1 : [ 1, "pop", "bc" ],
0xc2 : [ 3, "jp", "nz,nn" ],
0xc3 : [ 3, "jp", "nn" ],
0xc4 : [ 3, "call","nz,nn" ],
0xc5 : [ 1, "push","bc" ],
0xc6 : [ 2, "add", "a,n" ],
0xc7 : [ 1, "rst", "00" ],
0xc8 : [ 1, "ret", "z" ],
0xc9 : [ 1, "ret", "implied" ],
0xca : [ 3, "jp", "z,nn" ],
0xcc : [ 3, "call","z,nn" ],
0xcd : [ 3, "call", "nn" ],
0xce : [ 2, "adc", "a,n" ],
0xcf : [ 1, "rst", "08" ],
0xd0 : [ 1, "ret", "nc" ],
0xd1 : [ 1, "pop", "de" ],
0xd2 : [ 3, "jp", "nc,nn" ],
0xd3 : [ 2, "out", "indn,a" ],
0xd4 : [ 3, "call", "nc,nn" ],
0xd5 : [ 1, "push", "de" ],
0xd6 : [ 2, "sub", "n" ],
0xd7 : [ 1, "rst", "10" ],
0xd8 : [ 1, "ret", "c" ],
0xd9 : [ 1, "exx", "implied" ],
0xda : [ 3, "jp", "c,nn" ],
0xdb : [ 2, "in", "a,indn" ],
0xdc : [ 3, "call", "c,nn" ],
0xde : [ 2, "sbc", "a,n" ],
0xdf : [ 1, "rst", "18" ],
0xe0 : [ 1, "ret", "po" ],
0xe1 : [ 1, "pop", "hl" ],
0xe2 : [ 3, "jp", "po,nn" ],
0xe3 : [ 1, "ex", "indsp,hl" ],
0xe4 : [ 3, "call", "po,nn" ],
0xe5 : [ 1, "push", "hl" ],
0xe6 : [ 2, "and", "n" ],
0xe7 : [ 1, "rst", "20" ],
0xe8 : [ 1, "ret", "pe" ],
0xe9 : [ 1, "jp", "indhl" ],
0xea : [ 3, "jp", "pe,nn" ],
0xeb : [ 1, "ex", "de,hl" ],
0xec : [ 3, "call", "pe,nn" ],
0xee : [ 2, "xor", "n" ],
0xef : [ 1, "rst", "28" ],
0xf0 : [ 1, "ret", "p" ],
0xf1 : [ 1, "pop", "af" ],
0xf2 : [ 3, "jp", "p,nn" ],
0xf3 : [ 1, "di", "implied" ],
0xf4 : [ 3, "call", "p,nn" ],
0xf5 : [ 1, "push", "af" ],
0xf6 : [ 2, "or", "n" ],
0xf7 : [ 1, "rst", "30" ],
0xf8 : [ 1, "ret", "m" ],
0xf9 : [ 1, "ld", "sp,hl" ],
0xfa : [ 3, "jp", "m,nn" ],
0xfb : [ 1, "ei", "implied" ],
0xfc : [ 3, "call", "m,nn" ],
0xfe : [ 2, "cp", "n" ],
0xff : [ 1, "rst", "38" ],
# Multibyte instructions
0xcb00 : [ 2, "rlc", "b" ],
0xcb01 : [ 2, "rlc", "c" ],
0xcb02 : [ 2, "rlc", "d" ],
0xcb03 : [ 2, "rlc", "e" ],
0xcb04 : [ 2, "rlc", "h" ],
0xcb05 : [ 2, "rlc", "l" ],
0xcb06 : [ 2, "rlc", "indhl" ],
0xcb07 : [ 2, "rlc", "a" ],
0xcb08 : [ 2, "rrc", "b" ],
0xcb09 : [ 2, "rrc", "c" ],
0xcb0a : [ 2, "rrc", "d" ],
0xcb0b : [ 2, "rrc", "e" ],
0xcb0c : [ 2, "rrc", "h" ],
0xcb0d : [ 2, "rrc", "l" ],
0xcb0e : [ 2, "rrc", "indhl" ],
0xcb0f : [ 2, "rrc", "a" ],
0xcb10 : [ 2, "rl", "b" ],
0xcb11 : [ 2, "rl", "c" ],
0xcb12 : [ 2, "rl", "d" ],
0xcb13 : [ 2, "rl", "e" ],
0xcb14 : [ 2, "rl", "h" ],
0xcb15 : [ 2, "rl", "l" ],
0xcb16 : [ 2, "rl", "indhl" ],
0xcb17 : [ 2, "rl", "a" ],
0xcb18 : [ 2, "rr", "b" ],
0xcb19 : [ 2, "rr", "c" ],
0xcb1a : [ 2, "rr", "d" ],
0xcb1b : [ 2, "rr", "e" ],
0xcb1c : [ 2, "rr", "h" ],
0xcb1d : [ 2, "rr", "l" ],
0xcb1e : [ 2, "rr", "indhl" ],
0xcb1f : [ 2, "rr", "a" ],
0xcb20 : [ 2, "sla", "b" ],
0xcb21 : [ 2, "sla", "c" ],
0xcb22 : [ 2, "sla", "d" ],
0xcb23 : [ 2, "sla", "e" ],
0xcb24 : [ 2, "sla", "h" ],
0xcb25 : [ 2, "sla", "l" ],
0xcb26 : [ 2, "sla", "indhl" ],
0xcb27 : [ 2, "sla", "a" ],
0xcb28 : [ 2, "sra", "b" ],
0xcb29 : [ 2, "sra", "c" ],
0xcb2a : [ 2, "sra", "d" ],
0xcb2b : [ 2, "sra", "e" ],
0xcb2c : [ 2, "sra", "h" ],
0xcb2d : [ 2, "sra", "l" ],
0xcb2e : [ 2, "sra", "indhl" ],
0xcb2f : [ 2, "sra", "a" ],
0xcb38 : [ 2, "srl", "b" ],
0xcb39 : [ 2, "srl", "c" ],
0xcb3a : [ 2, "srl", "d" ],
0xcb3b : [ 2, "srl", "e" ],
0xcb3c : [ 2, "srl", "h" ],
0xcb3d : [ 2, "srl", "l" ],
0xcb3e : [ 2, "srl", "indhl" ],
0xcb3f : [ 2, "srl", "a" ],
0xcb40 : [ 2, "bit", "0,b" ],
0xcb41 : [ 2, "bit", "0,c" ],
0xcb42 : [ 2, "bit", "0,d" ],
0xcb43 : [ 2, "bit", "0,e" ],
0xcb44 : [ 2, "bit", "0,h" ],
0xcb45 : [ 2, "bit", "0,l" ],
0xcb46 : [ 2, "bit", "0,indhl" ],
0xcb47 : [ 2, "bit", "0,a" ],
0xcb48 : [ 2, "bit", "1,b" ],
0xcb49 : [ 2, "bit", "1,c" ],
0xcb4a : [ 2, "bit", "1,d" ],
0xcb4b : [ 2, "bit", "1,e" ],
0xcb4c : [ 2, "bit", "1,h" ],
0xcb4d : [ 2, "bit", "1,l" ],
0xcb4e : [ 2, "bit", "1,indhl" ],
0xcb4f : [ 2, "bit", "1,a" ],
0xcb50 : [ 2, "bit", "2,b" ],
0xcb51 : [ 2, "bit", "2,c" ],
0xcb52 : [ 2, "bit", "2,d" ],
0xcb53 : [ 2, "bit", "2,e" ],
0xcb54 : [ 2, "bit", "2,h" ],
0xcb55 : [ 2, "bit", "2,l" ],
0xcb56 : [ 2, "bit", "2,indhl" ],
0xcb57 : [ 2, "bit", "2,a" ],
0xcb58 : [ 2, "bit", "3,b" ],
0xcb59 : [ 2, "bit", "3,c" ],
0xcb5a : [ 2, "bit", "3,d" ],
0xcb5b : [ 2, "bit", "3,e" ],
0xcb5c : [ 2, "bit", "3,h" ],
0xcb5d : [ 2, "bit", "3,l" ],
0xcb5e : [ 2, "bit", "3,indhl" ],
0xcb5f : [ 2, "bit", "3,a" ],
0xcb60 : [ 2, "bit", "4,b" ],
0xcb61 : [ 2, "bit", "4,c" ],
0xcb62 : [ 2, "bit", "4,d" ],
0xcb63 : [ 2, "bit", "4,e" ],
0xcb64 : [ 2, "bit", "4,h" ],
0xcb65 : [ 2, "bit", "4,l" ],
0xcb66 : [ 2, "bit", "4,indhl" ],
0xcb67 : [ 2, "bit", "4,a" ],
0xcb68 : [ 2, "bit", "5,b" ],
0xcb69 : [ 2, "bit", "5,c" ],
0xcb6a : [ 2, "bit", "5,d" ],
0xcb6b : [ 2, "bit", "5,e" ],
0xcb6c : [ 2, "bit", "5,h" ],
0xcb6d : [ 2, "bit", "5,l" ],
0xcb6e : [ 2, "bit", "5,indhl" ],
0xcb6f : [ 2, "bit", "5,a" ],
0xcb70 : [ 2, "bit", "6,b" ],
0xcb71 : [ 2, "bit", "6,c" ],
0xcb72 : [ 2, "bit", "6,d" ],
0xcb73 : [ 2, "bit", "6,e" ],
0xcb74 : [ 2, "bit", "6,h" ],
0xcb75 : [ 2, "bit", "6,l" ],
0xcb76 : [ 2, "bit", "6,indhl" ],
0xcb77 : [ 2, "bit", "6,a" ],
0xcb78 : [ 2, "bit", "7,b" ],
0xcb79 : [ 2, "bit", "7,c" ],
0xcb7a : [ 2, "bit", "7,d" ],
0xcb7b : [ 2, "bit", "7,e" ],
0xcb7c : [ 2, "bit", "7,h" ],
0xcb7d : [ 2, "bit", "7,l" ],
0xcb7e : [ 2, "bit", "7,indhl" ],
0xcb7f : [ 2, "bit", "7,a" ],
0xcb80 : [ 2, "res", "0,b" ],
0xcb81 : [ 2, "res", "0,c" ],
0xcb82 : [ 2, "res", "0,d" ],
0xcb83 : [ 2, "res", "0,e" ],
0xcb84 : [ 2, "res", "0,h" ],
0xcb85 : [ 2, "res", "0,l" ],
0xcb86 : [ 2, "res", "0,indhl" ],
0xcb87 : [ 2, "res", "0,a" ],
0xcb88 : [ 2, "res", "1,b" ],
0xcb89 : [ 2, "res", "1,c" ],
0xcb8a : [ 2, "res", "1,d" ],
0xcb8b : [ 2, "res", "1,e" ],
0xcb8c : [ 2, "res", "1,h" ],
0xcb8d : [ 2, "res", "1,l" ],
0xcb8e : [ 2, "res", "1,indhl" ],
0xcb8f : [ 2, "res", "1,a" ],
0xcb90 : [ 2, "res", "2,b" ],
0xcb91 : [ 2, "res", "2,c" ],
0xcb92 : [ 2, "res", "2,d" ],
0xcb93 : [ 2, "res", "2,e" ],
0xcb94 : [ 2, "res", "2,h" ],
0xcb95 : [ 2, "res", "2,l" ],
0xcb96 : [ 2, "res", "2,indhl" ],
0xcb97 : [ 2, "res", "2,a" ],
0xcb98 : [ 2, "res", "3,b" ],
0xcb99 : [ 2, "res", "3,c" ],
0xcb9a : [ 2, "res", "3,d" ],
0xcb9b : [ 2, "res", "3,e" ],
0xcb9c : [ 2, "res", "3,h" ],
0xcb9d : [ 2, "res", "3,l" ],
0xcb9e : [ 2, "res", "3,indhl" ],
0xcb9f : [ 2, "res", "3,a" ],
0xcba0 : [ 2, "res", "4,b" ],
0xcba1 : [ 2, "res", "4,c" ],
0xcba2 : [ 2, "res", "4,d" ],
0xcba3 : [ 2, "res", "4,e" ],
0xcba4 : [ 2, "res", "4,h" ],
0xcba5 : [ 2, "res", "4,l" ],
0xcba6 : [ 2, "res", "4,indhl" ],
0xcba7 : [ 2, "res", "4,a" ],
0xcba8 : [ 2, "res", "5,b" ],
0xcba9 : [ 2, "res", "5,c" ],
0xcbaa : [ 2, "res", "5,d" ],
0xcbab : [ 2, "res", "5,e" ],
0xcbac : [ 2, "res", "5,h" ],
0xcbad : [ 2, "res", "5,l" ],
0xcbae : [ 2, "res", "5,indhl" ],
0xcbaf : [ 2, "res", "5,a" ],
0xcbb0 : [ 2, "res", "6,b" ],
0xcbb1 : [ 2, "res", "6,c" ],
0xcbb2 : [ 2, "res", "6,d" ],
0xcbb3 : [ 2, "res", "6,e" ],
0xcbb4 : [ 2, "res", "6,h" ],
0xcbb5 : [ 2, "res", "6,l" ],
0xcbb6 : [ 2, "res", "6,indhl" ],
0xcbb7 : [ 2, "res", "6,a" ],
0xcbb8 : [ 2, "res", "7,b" ],
0xcbb9 : [ 2, "res", "7,c" ],
0xcbba : [ 2, "res", "7,d" ],
0xcbbb : [ 2, "res", "7,e" ],
0xcbbc : [ 2, "res", "7,h" ],
0xcbbd : [ 2, "res", "7,l" ],
0xcbbe : [ 2, "res", "7,indhl" ],
0xcbbf : [ 2, "res", "7,a" ],
0xcbc0 : [ 2, "set", "0,b" ],
0xcbc1 : [ 2, "set", "0,c" ],
0xcbc2 : [ 2, "set", "0,d" ],
0xcbc3 : [ 2, "set", "0,e" ],
0xcbc4 : [ 2, "set", "0,h" ],
0xcbc5 : [ 2, "set", "0,l" ],
0xcbc6 : [ 2, "set", "0,indhl" ],
0xcbc7 : [ 2, "set", "0,a" ],
0xcbc8 : [ 2, "set", "1,b" ],
0xcbc9 : [ 2, "set", "1,c" ],
0xcbca : [ 2, "set", "1,d" ],
0xcbcb : [ 2, "set", "1,e" ],
0xcbcc : [ 2, "set", "1,h" ],
0xcbcd : [ 2, "set", "1,l" ],
0xcbce : [ 2, "set", "1,indhl" ],
0xcbcf : [ 2, "set", "1,a" ],
0xcbd0 : [ 2, "set", "2,b" ],
0xcbd1 : [ 2, "set", "2,c" ],
0xcbd2 : [ 2, "set", "2,d" ],
0xcbd3 : [ 2, "set", "2,e" ],
0xcbd4 : [ 2, "set", "2,h" ],
0xcbd5 : [ 2, "set", "2,l" ],
0xcbd6 : [ 2, "set", "2,indhl" ],
0xcbd7 : [ 2, "set", "2,a" ],
0xcbd8 : [ 2, "set", "3,b" ],
0xcbd9 : [ 2, "set", "3,c" ],
0xcbda : [ 2, "set", "3,d" ],
0xcbdb : [ 2, "set", "3,e" ],
0xcbdc : [ 2, "set", "3,h" ],
0xcbdd : [ 2, "set", "3,l" ],
0xcbde : [ 2, "set", "3,indhl" ],
0xcbdf : [ 2, "set", "3,a" ],
0xcbe0 : [ 2, "set", "4,b" ],
0xcbe1 : [ 2, "set", "4,c" ],
0xcbe2 : [ 2, "set", "4,d" ],
0xcbe3 : [ 2, "set", "4,e" ],
0xcbe4 : [ 2, "set", "4,h" ],
0xcbe5 : [ 2, "set", "4,l" ],
0xcbe6 : [ 2, "set", "4,indhl" ],
0xcbe7 : [ 2, "set", "4,a" ],
0xcbe8 : [ 2, "set", "5,b" ],
0xcbe9 : [ 2, "set", "5,c" ],
0xcbea : [ 2, "set", "5,d" ],
0xcbeb : [ 2, "set", "5,e" ],
0xcbec : [ 2, "set", "5,h" ],
0xcbed : [ 2, "set", "5,l" ],
0xcbee : [ 2, "set", "5,indhl" ],
0xcbef : [ 2, "set", "5,a" ],
0xcbf0 : [ 2, "set", "6,b" ],
0xcbf1 : [ 2, "set", "6,c" ],
0xcbf2 : [ 2, "set", "6,d" ],
0xcbf3 : [ 2, "set", "6,e" ],
0xcbf4 : [ 2, "set", "6,h" ],
0xcbf5 : [ 2, "set", "6,l" ],
0xcbf6 : [ 2, "set", "6,indhl" ],
0xcbf7 : [ 2, "set", "6,a" ],
0xcbf8 : [ 2, "set", "7,b" ],
0xcbf9 : [ 2, "set", "7,c" ],
0xcbfa : [ 2, "set", "7,d" ],
0xcbfb : [ 2, "set", "7,e" ],
0xcbfc : [ 2, "set", "7,h" ],
0xcbfd : [ 2, "set", "7,l" ],
0xcbfe : [ 2, "set", "7,indhl" ],
0xcbff : [ 2, "set", "7,a" ],
0xdd09 : [ 2, "add", "ix,bc" ],
0xdd19 : [ 2, "add", "ix,de" ],
0xdd21 : [ 4, "ld", "ix,aa" ],
0xdd22 : [ 4, "ld", "indaa,ix" ],
0xdd23 : [ 2, "inc", "ix" ],
0xdd29 : [ 2, "add", "ix,ix" ],
0xdd2a : [ 4, "ld", "ix,indaa" ],
0xdd2b : [ 2, "dec", "ix" ],
0xdd34 : [ 3, "inc", "indix+d" ],
0xdd35 : [ 3, "dec", "indix+d" ],
0xdd36 : [ 4, "ld", "indix+d,n" ],
0xdd39 : [ 2, "add", "ix,sp" ],
0xdd46 : [ 3, "ld", "b,indix+d" ],
0xdd4e : [ 3, "ld", "c,indix+d" ],
0xdd56 : [ 3, "ld", "d,indix+d" ],
0xdd5e : [ 3, "ld", "e,indix+d" ],
0xdd66 : [ 3, "ld", "h,indix+d" ],
0xdd6e : [ 3, "ld", "l,indix+d" ],
0xdd70 : [ 3, "ld", "indix+d,b" ],
0xdd71 : [ 3, "ld", "indix+d,c" ],
0xdd72 : [ 3, "ld", "indix+d,d" ],
0xdd73 : [ 3, "ld", "indix+d,e" ],
0xdd74 : [ 3, "ld", "indix+d,h" ],
0xdd75 : [ 3, "ld", "indix+d,l" ],
0xdd77 : [ 3, "ld", "indix+d,a" ],
0xdd7e : [ 3, "ld", "a,indix+d" ],
0xdd86 : [ 3, "add", "a,indix+d" ],
0xdd8e : [ 3, "adc", "a,indix+d" ],
0xdd96 : [ 3, "sub", "indix+d" ],
0xdd9e : [ 3, "sbc", "a,indix+d" ],
0xdda6 : [ 3, "and", "indix+d" ],
0xddae : [ 3, "xor", "indix+d" ],
0xddb6 : [ 3, "or", "indix+d" ],
0xddbe : [ 3, "cp", "indix+d" ],
0xdd8e : [3, "adc", "indix+d" ],
0xed40 : [ 2, "in", "b,indc" ],
0xed41 : [ 2, "out", "indc,b" ],
0xed42 : [ 2, "sbc", "hl,bc" ],
0xed43 : [ 4, "ld", "indaa,bc" ],
0xed44 : [ 2, "neg", "implied" ],
0xed45 : [ 2, "retn", "implied" ],
0xed46 : [ 2, "im", "0" ],
0xed47 : [ 2, "ld", "i,a" ],
0xed48 : [ 2, "in", "c,indc" ],
0xed49 : [ 2, "out", "indc,c" ],
0xed4a : [ 2, "adc", "hl,bc" ],
0xed4b : [ 4, "ld", "bc,indaa" ],
0xed4d : [ 2, "reti", "implied" ],
0xed4f : [ 2, "ld", "r,a" ],
0xed50 : [ 2, "in", "d,indc" ],
0xed51 : [ 2, "out", "indc,d" ],
0xed52 : [ 2, "sbc", "hl,de" ],
0xed53 : [ 4, "ld", "indaa,de" ],
0xed56 : [ 2, "im", "1" ],
0xed57 : [ 2, "ld", "a,i" ],
0xed58 : [ 2, "in", "e,indc" ],
0xed59 : [ 2, "out", "indc,e" ],
0xed5a : [ 2, "adc", "hl,de" ],
0xed5b : [ 4, "ld", "de,indaa" ],
0xed5e : [ 2, "im", "2" ],
0xed5f : [ 2, "ld", "a,r" ],
0xed60 : [ 2, "in", "h,indc" ],
0xed61 : [ 2, "out", "indc,h" ],
0xed62 : [ 2, "sbc", "hl,hl" ],
0xed67 : [ 2, "rrd", "implied" ],
0xed68 : [ 2, "in", "l,indc" ],
0xed69 : [ 2, "out", "indc,l" ],
0xed6a : [ 2, "adc", "hl,hl" ],
0xed6f : [ 2, "rld", "implied" ],
0xed72 : [ 2, "sbc", "hl,sp" ],
0xed73 : [ 4, "ld", "indaa,sp" ],
0xed76 : [ 2, "in", "a,indc" ],
0xed79 : [ 2, "out", "indc,a" ],
0xed7a : [ 2, "adc", "hl,sp" ],
0xed7b : [ 4, "ld", "sp,indaa" ],
0xeda0 : [ 2, "ldi", "implied" ],
0xeda1 : [ 2, "cpi", "implied" ],
0xeda2 : [ 2, "ini", "implied" ],
0xeda3 : [ 2, "outi", "implied" ],
0xeda8 : [ 2, "ldd", "implied" ],
0xeda9 : [ 2, "cpd", "implied" ],
0xedaa : [ 2, "ind", "implied" ],
0xedab : [ 2, "outd", "implied" ],
0xedb0 : [ 2, "ldir", "implied" ],
0xedb1 : [ 2, "cpir", "implied" ],
0xedb2 : [ 2, "inir", "implied" ],
0xedb3 : [ 2, "otir", "implied" ],
0xedb8 : [ 2, "lddr", "implied" ],
0xedb9 : [ 2, "cpdr", "implied" ],
0xedba : [ 2, "indr", "implied" ],
0xedbb : [ 2, "otdr", "implied" ],
0xfd09 : [ 2, "add", "iy,bc" ],
0xfd19 : [ 2, "add", "iy,de" ],
0xfd21 : [ 4, "ld", "iy,aa" ],
0xfd22 : [ 4, "ld", "indaa,iy" ],
0xfd23 : [ 2, "inc", "iy" ],
0xfd29 : [ 2, "add", "iy,iy" ],
0xfd2a : [ 4, "ld", "iy,indaa" ],
0xfd2b : [ 2, "dec", "iy" ],
0xfd34 : [ 3, "inc", "indiy+d" ],
0xfd35 : [ 3, "dec", "indiy+d" ],
0xfd36 : [ 4, "ld", "indiy+d,n" ],
0xfd39 : [ 2, "add", "iy,sp" ],
0xfd46 : [ 3, "ld", "b,indiy+d" ],
0xfd4e : [ 3, "ld", "c,indiy+d" ],
0xfd56 : [ 3, "ld", "d,indiy+d" ],
0xfd5e : [ 3, "ld", "e,indiy+d" ],
0xfd66 : [ 3, "ld", "h,indiy+d" ],
0xfd6e : [ 3, "ld", "l,indiy+d" ],
0xfd70 : [ 3, "ld", "indiy+d,b" ],
0xfd71 : [ 3, "ld", "indiy+d,c" ],
0xfd72 : [ 3, "ld", "indiy+d,d" ],
0xfd73 : [ 3, "ld", "indiy+d,e" ],
0xfd74 : [ 3, "ld", "indiy+d,h" ],
0xfd75 : [ 3, "ld", "indiy+d,l" ],
0xfd77 : [ 3, "ld", "indiy+d,a" ],
0xfd7e : [ 3, "ld", "a,indiy+d" ],
0xfd86 : [ 3, "add", "a,indiy+d" ],
0xfd8e : [ 3, "adc", "a,indiy+d" ],
0xfd96 : [ 3, "sub", "indiy+d" ],
0xfd9e : [ 3, "sbc", "a,indiy+d" ],
0xfda6 : [ 3, "and", "indiy+d" ],
0xfdae : [ 3, "xor", "indiy+d" ],
0xfdb6 : [ 3, "or", "indiy+d" ],
0xfdbe : [ 3, "cp", "indiy+d" ],
# Placeholder 2-byte leadins for the 4-byte ix/iy bit instructions fully
# defined below. The z80bit flag triggers a special case in the disassembler
# to look up the 4 byte instruction.
0xddcb : [ 4, "ixbit", "implied", z80bit ],
0xfdcb : [ 4, "iybit", "implied", z80bit ],
}
def extra_opcodes(addr_table, op_table):
# Create all the 0xddcb and 0xfdcb addressing modes. The modes look like [0-7],(i[xy]+*)[,[abcdehl]]?
for index in ['x', 'y']:
for bit in range(8):
k = "%d,indi%s+d" % (bit, index)
v = "%d,(i%s+${0:02X})" % (bit, index)
addr_table[k] = v
for reg in ['a', 'b', 'c', 'd', 'e', 'h', 'l']:
k = "%d,indi%s+d,%s" % (bit, index, reg)
v = "%d,(i%s+${0:02X}),%s" % (bit, index, reg)
addr_table[k] = v
# Create all the 0xddcb and 0xfdcb opcodes. These are all 4 byte opcodes
# where the 3rd byte is a -128 - +127 offset. For the purposes of using
# this table, the 3rd byte will be marked as zero and the disassembler will
# have to insert the real 3rd byte the check of the z80bit special case
for first_byte, x_or_y in [(0xdd, 'x'), (0xfd, 'y')]:
# groups of 8, expand to full 256
mnemonics_8 = ['rlc', 'rrc', 'rl', 'rr', 'sla', 'sra', 'sll', 'sr1'] + ['bit'] * 8 + ['res'] * 8 + ['set'] * 8
mnemonics = [m for mnemonic in mnemonics_8 for m in [mnemonic]*8]
# create all 256 addressing modes, in groups of 64
addrmodes = ['indi%s+d' + a for a in [',b', ',c', ',d', ',e', ',h', ',l', '', ',a']] * 8 + [f % d for d in range(8) for f in ['%d,indi%%s+d'] * 8] + [f % d for d in range(8) for f in ['%d,indi%%s+d' + a for a in [',b', ',c', ',d', ',e', ',h', ',l', '', ',a']]] * 2
for fourth_byte, (instruction, addrmode) in enumerate(zip(mnemonics, addrmodes)):
opcode = (first_byte << 24) + (0xcb << 16) + fourth_byte
op_table[opcode] = [ 4, instruction, addrmode % x_or_y, z80bit ]
extra_opcodes(addressModeTable, opcodeTable)
del extra_opcodes
# End of processor specific code
##########################################################################
|
jefftranter/udis
|
z80.py
|
Python
|
apache-2.0
| 32,759 |
from rasa.engine.caching import TrainingCache
from rasa.engine.graph import ExecutionContext, GraphNode, GraphSchema, SchemaNode
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.training import fingerprinting
from rasa.engine.training.components import PrecomputedValueProvider
from rasa.engine.training.hooks import TrainingHook
from tests.engine.graph_components_test_classes import (
CacheableComponent,
CacheableText,
)
def test_training_hook_saves_to_cache(
default_model_storage: ModelStorage, temp_cache: TrainingCache,
):
# We need an execution context so the hook can determine the class of the graph
# component
execution_context = ExecutionContext(
GraphSchema(
{
"hello": SchemaNode(
needs={},
constructor_name="create",
fn="run",
config={},
uses=CacheableComponent,
)
}
),
"1",
)
node = GraphNode(
node_name="hello",
component_class=CacheableComponent,
constructor_name="create",
component_config={},
fn_name="run",
inputs={"suffix": "input_node"},
eager=False,
model_storage=default_model_storage,
resource=None,
execution_context=execution_context,
hooks=[
TrainingHook(
cache=temp_cache,
model_storage=default_model_storage,
pruned_schema=execution_context.graph_schema,
)
],
)
node(("input_node", "Joe"))
# This is the same key that the hook will generate
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=CacheableComponent,
config={"prefix": "Hello "},
inputs={"suffix": "Joe"},
)
output_fingerprint_key = temp_cache.get_cached_output_fingerprint(fingerprint_key)
assert output_fingerprint_key
cached_result = temp_cache.get_cached_result(
output_fingerprint_key=output_fingerprint_key,
model_storage=default_model_storage,
node_name="hello",
)
assert isinstance(cached_result, CacheableText)
assert cached_result.text == "Hello Joe"
def test_training_hook_does_not_cache_cached_component(
default_model_storage: ModelStorage, temp_cache: TrainingCache,
):
# We need an execution context so the hook can determine the class of the graph
# component
execution_context = ExecutionContext(
GraphSchema(
{
"hello": SchemaNode(
needs={},
constructor_name="create",
fn="run",
config={},
uses=PrecomputedValueProvider,
)
}
),
"1",
)
node = GraphNode(
node_name="hello",
component_class=PrecomputedValueProvider,
constructor_name="create",
component_config={"output": CacheableText("hi")},
fn_name="get_value",
inputs={},
eager=False,
model_storage=default_model_storage,
resource=None,
execution_context=execution_context,
hooks=[
TrainingHook(
cache=temp_cache,
model_storage=default_model_storage,
pruned_schema=execution_context.graph_schema,
)
],
)
node(("input_node", "Joe"))
# This is the same key that the hook will generate
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=PrecomputedValueProvider,
config={"output": CacheableText("hi")},
inputs={},
)
# The hook should not cache the output of a PrecomputedValueProvider
assert not temp_cache.get_cached_output_fingerprint(fingerprint_key)
|
RasaHQ/rasa_nlu
|
tests/engine/training/test_hooks.py
|
Python
|
apache-2.0
| 3,903 |
#!/usr/bin/env python3
# In this example we will see how data in reference tables can be manipulated
# using the REST API.
# Our company has a multilevel security authorization architecture. Users
# are assigned an authorization server that they must use to log in to the
# network. Once inside the general network, some users are authorized to access
# the secure network. They use a different authorization server and must
# connect through an assigned port.
# We have set up a reference table that stores the ip addresses of the server
# that each user must use to login to the network. It also stores the ip
# address and port of the secure server that authorized users must use to
# connect to the secure network. We also store the time the user last logged in
# to the secure server. Rules are in place to generate offenses if users
# attempt to access the network in an unauthorized manner.
# We would like to impose a business rule to revoke a user's secure access if
# they do not log in for a period of time. This time period is determined by
# an external system.
# We would also like to generate a report showing the users that have secure
# access, those that used to have it, but let it expire, and those that don't
# have secure access, in order to track who is using our networks.
# For a list of the endpoints that you can use along with the parameters that
# they accept you can view the REST API interactive help page on your
# deployment at https://<hostname>/api_doc
# You can also retrieve a list of available endpoints through the API itself
# at the /api/help/endpoints endpoint.
import json
import os
import sys
import time
import Cleanup
import importlib
sys.path.append(os.path.realpath('../modules'))
client_module = importlib.import_module('RestApiClient')
SampleUtilities = importlib.import_module('SampleUtilities')
def main():
# Create our client and set up some sample data.
client = client_module.RestApiClient(version='6.0')
setup_data(client)
# First lets have a look at the data in the system.
response = client.call_api(
'reference_data/tables/rest_api_samples_server_access', 'GET')
SampleUtilities.pretty_print_response(response)
response = client.call_api(
'reference_data/tables/rest_api_samples_server_access', 'GET')
response_body = json.loads(response.read().decode('utf-8'))
data = response_body['data']
# Note that tables are stored sparsely, that is to say if a particular
# cell is empty it does not exist in the table. However, our external
# reporting tool can put appropriate defaults for these cells into the
# report.
print_custom_report(data)
# Now our external system calculates which users should have their secure
# access revoked.
threshold = get_old_data_threshold(data)
# check to see which users should have their secure access expired.
for user in data:
if ('Last_Secure_Login' in data[user]):
if (data[user]['Last_Secure_Login']['last_seen'] < threshold):
print("User '" + user +
"' has not logged in to the secure server recently. " +
"Revoking their access.")
outer_key = user
if ('Authorization_Server_IP_Secure' in data[user]):
inner_key = 'Authorization_Server_IP_Secure'
value = (data[user]['Authorization_Server_IP_Secure']
['value'])
response = client.call_api(
'reference_data/tables/rest_api_samples_server_' +
'access/' + outer_key + '/' + inner_key + '?value=' +
value, 'DELETE')
if ('Authorization_Server_PORT_Secure' in data[user]):
inner_key = 'Authorization_Server_PORT_Secure'
value = (data[user]['Authorization_Server_PORT_Secure']
['value'])
response = client.call_api(
'reference_data/tables/rest_api_samples_server_' +
'access/' + outer_key + '/' + inner_key + '?value=' +
value, 'DELETE')
# now lets have a look at the data after we updated the table.
response = client.call_api(
'reference_data/tables/rest_api_samples_server_access', 'GET')
SampleUtilities.pretty_print_response(response)
response = client.call_api(
'reference_data/tables/rest_api_samples_server_access', 'GET')
response_body = json.loads(response.read().decode('utf-8'))
data = response_body['data']
print_custom_report(data)
# You can uncomment this line to have this script remove the data it
# creates after it is done, or you can invoke the Cleanup script directly.
# Cleanup.cleanup_04_tables(client)
# This helper function sets up data used in this sample.
def setup_data(client):
current_time = int(time.time() * 1000)
key_name_types = (
"[{\"element_type\": \"IP\", " +
"\"key_name\": \"Authorization_Server_IP_Secure\"}, " +
"{\"element_type\": \"PORT\", " +
"\"key_name\": \"Authorization_Server_PORT_Secure\"}, " +
"{\"element_type\": \"DATE\", \"key_name\": \"Last_Secure_Login\"}, " +
"{\"element_type\": \"IP\", " +
"\"key_name\": \"Authorization_Server_IP_General\"}]")
# Create the reference set.
params = {'name': 'rest_api_samples_server_access',
'key_name_types': key_name_types,
'element_type': 'ALN'}
SampleUtilities.data_setup(client, 'reference_data/tables', 'POST',
params=params)
# For each parameter set defined in the for loop, add the data to the
# rest_api_samples_server_access reference set using the POST
# reference_data/tables/{name} endpoint.
for params in [
{
'outer_key': 'calvin',
'inner_key': 'Authorization_Server_IP_Secure',
'value': '6.3.9.12'
},
{
'outer_key': 'calvin',
'inner_key': 'Authorization_Server_PORT_Secure',
'value': '443'
},
{
'outer_key': 'calvin',
'inner_key': 'Authorization_Server_IP_General',
'value': '7.12.15.12'
},
{
'outer_key': 'calvin',
'inner_key': 'Last_Secure_Login',
'value': str(current_time)
},
{
'outer_key': 'socrates',
'inner_key': 'Authorization_Server_IP_General',
'value': '7.12.14.85'
},
{
'outer_key': 'mill',
'inner_key': 'Authorization_Server_IP_Secure',
'value': '6.3.9.12'
},
{
'outer_key': 'mill',
'inner_key': 'Authorization_Server_PORT_Secure',
'value': '443'
},
{
'outer_key': 'mill',
'inner_key': 'Last_Secure_Login',
'value': str(current_time)
},
{
'outer_key': 'mill',
'inner_key': 'Authorization_Server_IP_General',
'value': '7.13.22.85'
},
{
'outer_key': 'hobbes',
'inner_key': 'Authorization_Server_IP_Secure',
'value': '6.3.9.12'
},
{
'outer_key': 'hobbes',
'inner_key': 'Authorization_Server_PORT_Secure',
'value': '22'
},
{
'outer_key': 'hobbes',
'inner_key': 'Last_Secure_Login',
'value': str(current_time)
},
{
'outer_key': 'hobbes',
'inner_key': 'Authorization_Server_IP_General',
'value': '7.12.19.125'
},
{
'outer_key': 'aquinas',
'inner_key': 'Last_Secure_Login',
'value': str(current_time - 1000000)
},
{
'outer_key': 'aquinas',
'inner_key': 'Authorization_Server_IP_General',
'value': '7.12.15.12'
}]:
SampleUtilities.data_setup(
client, 'reference_data/tables/rest_api_samples_server_access',
'POST', params=params)
# This function represents work done by an external system to determine which
# users should have their secure access revoked.
def get_old_data_threshold(data):
total_time = 0
counter = 0
for outer_key in data:
for inner_key in data[outer_key]:
if (('Authorization_Server_IP_Secure' in data[outer_key]) and
('Last_Secure_Login' in data[outer_key])):
total_time += data[outer_key]['Last_Secure_Login']['last_seen']
counter += 1
return total_time / counter
# This function represents work done by an external reporting tool.
def print_custom_report(data):
# print the table headers.
usernames = data.keys()
table_headers_dict = {}
table_headers = []
known_headers = ['Authorization_Server_IP_Secure',
'Authorization_Server_PORT_Secure',
'Authorization_Server_IP_General', 'Last_Secure_Login']
# calculate the full list table headers, since not all columns will exist
# in each row.
for user in usernames:
for header in data[user]:
table_headers_dict[header] = ""
# Get the table headers into a list and sort them.
for header in table_headers_dict:
table_headers.append(header)
table_headers.sort()
# pretty print the table headers.
print('----------------------------------------'.ljust(40), end="")
for header in table_headers:
print('----------------------------------------'.ljust(40), end="")
print()
print(" usernames".ljust(40), end="")
for header in table_headers:
print(header.ljust(40), end="")
print()
print('----------------------------------------'.ljust(40), end="")
for header in table_headers:
print('----------------------------------------'.ljust(40), end="")
unsecure_users = {}
secure_user = {}
expired_users = {}
# sort the users into categories.
for user in usernames:
# if a user has a secure IP assigned, they are a secure user.
if('Authorization_Server_IP_Secure' in data[user]):
secure_user[user] = data[user]
# if a user does not have a secure IP assigned but has logged in
# securely in the past then they are an expired secure user.
elif('Last_Secure_Login' in data[user]):
expired_users[user] = data[user]
# otherwise they are a general user.
else:
unsecure_users[user] = data[user]
# pretty print the table rows.
print("\nUnsecure Users")
for username in unsecure_users:
print_row(username, unsecure_users[username], table_headers,
known_headers, "N/A")
print("\nExpired Secure Users")
for username in expired_users:
print_row(username, expired_users[username], table_headers,
known_headers, "Expired")
print("\nSecure Users")
for username in secure_user:
print_row(username, secure_user[username], table_headers,
known_headers, "Not Set")
# This function prints a row of the custom report based on the information
# extracted by the reporting system.
def print_row(username, user, table_headers, known_headers, not_set_message):
print((" " + username).ljust(40), end="")
for column in table_headers:
if (column in user):
# Format the login information as a date.
if (column == 'Last_Secure_Login'):
login_time = time.localtime(int(user[column]['value']) / 1000)
print(time.strftime('%Y-%m-%d %H:%M:%S', login_time).ljust(40),
end="")
else:
print(user[column]['value'].ljust(40), end="")
# If this known column does not exist for this user, print the
# 'not set' message.
elif (column in known_headers):
print(not_set_message.ljust(40), end="")
# Leave unassigned custom columns (if any exist) blank.
else:
print("".ljust(40), end="")
print()
if __name__ == "__main__":
main()
|
ibm-security-intelligence/api-samples
|
reference_data/04_Tables.py
|
Python
|
apache-2.0
| 12,660 |
from django.contrib import admin
'''from tester.models import Club,Member,Signup,Event
class admin_club(admin.ModelAdmin):
list_display=["club_name"]
class admin_event(admin.ModelAdmin):
list_display=["event_name"]
class admin_student(admin.ModelAdmin):
list_display=["usn","name"]
class admin_member(admin.ModelAdmin):
list_display=["club_id","usn"]
admin.site.register(Club,admin_club)
admin.site.register(Member,admin_member)
admin.site.register(Signup,admin_student)
admin.site.register(Event,admin_event)
'''
|
anirudhagar13/PES-Portal
|
pes_portal/club/admin.py
|
Python
|
apache-2.0
| 548 |
from datetime import datetime, timedelta
from manager_rest.test.base_test import BaseServerTestCase
from cloudify_rest_client.exceptions import CloudifyClientError
class ExecutionSchedulesTestCase(BaseServerTestCase):
DEPLOYMENT_ID = 'deployment'
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
an_hour_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=1)
two_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=2)
three_hours_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(hours=3)
three_weeks_from_now = \
datetime.utcnow().replace(microsecond=0) + timedelta(weeks=3)
deployment_id = None
def setUp(self):
super(ExecutionSchedulesTestCase, self).setUp()
_, self.deployment_id, _, _ = self.put_deployment(self.DEPLOYMENT_ID)
def test_schedule_create(self):
schedule_id = 'sched-1'
workflow_id = 'install'
schedule = self.client.execution_schedules.create(
schedule_id, self.deployment_id, workflow_id,
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(schedule.id, schedule_id)
self.assertEqual(schedule.deployment_id, self.deployment_id)
self.assertEqual(schedule.workflow_id, workflow_id)
self.assertEqual(datetime.strptime(schedule.since, self.fmt),
self.an_hour_from_now)
self.assertEqual(len(schedule['all_next_occurrences']), 5)
self.assertEqual(
datetime.strptime(schedule['next_occurrence'], self.fmt),
self.an_hour_from_now)
self.assertEqual(schedule['slip'], 0)
self.assertEqual(schedule['stop_on_fail'], False)
def test_schedule_create_weekdays(self):
schedule = self.client.execution_schedules.create(
'sched-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.three_weeks_from_now,
recurrence='1 days', weekdays=['mo', 'tu', 'we', 'th'])
self.assertEqual(len(schedule['all_next_occurrences']), 12) # 3w * 4d
def test_schedules_list(self):
schedule_ids = ['sched-1', 'sched-2']
for schedule_id in schedule_ids:
self.client.execution_schedules.create(
schedule_id, self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
schedules = self.client.execution_schedules.list()
self.assertEqual(len(schedules), 2)
self.assertSetEqual({s.id for s in schedules}, set(schedule_ids))
def test_schedule_delete(self):
self.client.execution_schedules.create(
'delete-me', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='1 minutes', count=5)
self.assertEqual(len(self.client.execution_schedules.list()), 1)
self.client.execution_schedules.delete('delete-me', self.deployment_id)
self.assertEqual(len(self.client.execution_schedules.list()), 0)
def test_schedule_update(self):
schedule = self.client.execution_schedules.create(
'update-me', self.deployment_id, 'install',
since=self.an_hour_from_now, until=self.two_hours_from_now,
recurrence='1 minutes')
# `until` is inclusive
self.assertEqual(len(schedule['all_next_occurrences']), 61)
self.assertEqual(schedule['rule']['recurrence'], '1 minutes')
self.assertEqual(schedule['slip'], 0)
self.client.execution_schedules.update(
'update-me', self.deployment_id, recurrence='5 minutes', slip=30)
# get the schedule from the DB and not directly from .update endpoint
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 13) # 60/5+1
self.assertEqual(schedule['rule']['recurrence'], '5 minutes')
self.assertEqual(schedule['slip'], 30)
self.client.execution_schedules.update(
'update-me', self.deployment_id, until=self.three_hours_from_now)
schedule = self.client.execution_schedules.get('update-me',
self.deployment_id)
self.assertEqual(len(schedule['all_next_occurrences']), 25) # 2*60/5+1
def test_schedule_get_invalid_id(self):
self.assertRaisesRegex(
CloudifyClientError,
'404: Requested `ExecutionSchedule` .* was not found',
self.client.execution_schedules.get,
'nonsuch',
self.deployment_id
)
def test_schedule_create_no_since(self):
self.assertRaises(
AssertionError,
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_time_format(self):
self.assertRaisesRegex(
AttributeError,
"'str' object has no attribute 'isoformat'",
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'install',
since='long ago', recurrence='1 minutes', count=5
)
def test_schedule_create_invalid_workflow(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: Workflow some_workflow does not exist',
self.client.execution_schedules.create,
'some_id', self.deployment_id, 'some_workflow',
since=self.an_hour_from_now, recurrence='1 minutes', count=5,
)
def test_schedule_invalid_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['oneday', 'someday']
)
self.client.execution_schedules.create(
'good-weekdays', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours', count=6,
weekdays=['mo', 'tu']
)
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.update,
'good-weekdays', self.deployment_id, weekdays=['oneday', 'someday']
)
def test_schedule_create_invalid_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* invalid weekday',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['5tu']
)
def test_schedule_create_invalid_recurrence_with_complex_weekdays(self):
self.assertRaisesRegex(
CloudifyClientError,
'400:.* complex weekday expression',
self.client.execution_schedules.create,
'bad-complex-wd', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='4 hours',
weekdays=['2mo', 'l-tu']
)
def test_schedule_invalid_repetition_without_recurrence(self):
recurrence_error = \
'400: recurrence must be specified for execution count ' \
'larger than 1'
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.create,
'no-recurrence-no-count', self.deployment_id, 'uninstall',
since=self.an_hour_from_now, weekdays=['su', 'mo', 'tu'],
)
self.client.execution_schedules.create(
'no-recurrence-count-1', self.deployment_id, 'install',
since=self.an_hour_from_now, count=1,
)
self.assertRaisesRegex(
CloudifyClientError,
recurrence_error,
self.client.execution_schedules.update,
'no-recurrence-count-1', self.deployment_id, count=2
)
def test_schedule_create_invalid_recurrence(self):
self.assertRaisesRegex(
CloudifyClientError,
'400: `10 doboshes` is not a legal recurrence expression.',
self.client.execution_schedules.create,
'bad-freq', self.deployment_id, 'install',
since=self.an_hour_from_now, recurrence='10 doboshes'
)
|
cloudify-cosmo/cloudify-manager
|
rest-service/manager_rest/test/endpoints/test_execution_schedules.py
|
Python
|
apache-2.0
| 8,636 |
##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for L{txdav.carddav.datastore.sql}, mostly based on
L{txdav.carddav.datastore.test.common}.
"""
from twext.enterprise.dal.syntax import Select, Parameter
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import deferLater
from twisted.trial import unittest
from twistedcaldav import carddavxml
from twistedcaldav.vcard import Component as VCard
from twistedcaldav.vcard import Component as VComponent
from txdav.base.propertystore.base import PropertyName
from txdav.carddav.datastore.test.common import CommonTests as AddressBookCommonTests, \
vcard4_text
from txdav.carddav.datastore.test.test_file import setUpAddressBookStore
from txdav.carddav.datastore.util import _migrateAddressbook, migrateHome
from txdav.common.icommondatastore import NoSuchObjectResourceError
from txdav.common.datastore.sql import EADDRESSBOOKTYPE, CommonObjectResource
from txdav.common.datastore.sql_tables import _ABO_KIND_PERSON, _ABO_KIND_GROUP, schema
from txdav.common.datastore.test.util import cleanStore
from txdav.carddav.datastore.sql import AddressBook
from txdav.xml.rfc2518 import GETContentLanguage, ResourceType
class AddressBookSQLStorageTests(AddressBookCommonTests, unittest.TestCase):
"""
AddressBook SQL storage tests.
"""
@inlineCallbacks
def setUp(self):
yield super(AddressBookSQLStorageTests, self).setUp()
yield self.buildStoreAndDirectory(
extraUids=(
u"home_empty",
u"homeNew",
)
)
yield self.populate()
@inlineCallbacks
def populate(self):
populateTxn = self.storeUnderTest().newTransaction()
for homeUID in self.requirements:
addressbooks = self.requirements[homeUID]
home = yield populateTxn.addressbookHomeWithUID(homeUID, True)
if addressbooks is not None:
addressbook = home.addressbook()
addressbookObjNames = addressbooks[addressbook.name()]
if addressbookObjNames is not None:
for objectName in addressbookObjNames:
objData = addressbookObjNames[objectName]
yield addressbook.createAddressBookObjectWithName(
objectName, VCard.fromString(objData)
)
yield populateTxn.commit()
self.notifierFactory.reset()
@inlineCallbacks
def assertAddressbooksSimilar(self, a, b, bAddressbookFilter=None):
"""
Assert that two addressbooks have a similar structure (contain the same
events).
"""
@inlineCallbacks
def namesAndComponents(x, filter=lambda x: x.component()):
fromObjs = yield x.addressbookObjects()
returnValue(dict([(fromObj.name(), (yield filter(fromObj)))
for fromObj in fromObjs]))
if bAddressbookFilter is not None:
extra = [bAddressbookFilter]
else:
extra = []
self.assertEquals((yield namesAndComponents(a)),
(yield namesAndComponents(b, *extra)))
def assertPropertiesSimilar(self, a, b, disregard=[]):
"""
Assert that two objects with C{properties} methods have similar
properties.
@param disregard: a list of L{PropertyName} keys to discard from both
input and output.
"""
def sanitize(x):
result = dict(x.properties().items())
for key in disregard:
result.pop(key, None)
return result
self.assertEquals(sanitize(a), sanitize(b))
def fileTransaction(self):
"""
Create a file-backed addressbook transaction, for migration testing.
"""
setUpAddressBookStore(self)
fileStore = self.addressbookStore
txn = fileStore.newTransaction()
self.addCleanup(txn.commit)
return txn
@inlineCallbacks
def test_migrateAddressbookFromFile(self):
"""
C{_migrateAddressbook()} can migrate a file-backed addressbook to a
database- backed addressbook.
"""
fromAddressbook = yield self.fileTransaction().addressbookHomeWithUID(
"home1").addressbookWithName("addressbook")
toHome = yield self.transactionUnderTest().addressbookHomeWithUID(
"new-home", create=True)
toAddressbook = yield toHome.addressbookWithName("addressbook")
yield _migrateAddressbook(fromAddressbook, toAddressbook,
lambda x: x.component())
yield self.assertAddressbooksSimilar(fromAddressbook, toAddressbook)
@inlineCallbacks
def test_migrateBadAddressbookFromFile(self):
"""
C{_migrateAddressbook()} can migrate a file-backed addressbook to a
database-backed addressbook. We need to test what happens when there
is "bad" address data present in the file-backed addressbook.
"""
fromAddressbook = yield self.fileTransaction().addressbookHomeWithUID(
"home_bad").addressbookWithName("addressbook")
toHome = yield self.transactionUnderTest().addressbookHomeWithUID(
"new-home", create=True)
toAddressbook = yield toHome.addressbookWithName("addressbook")
ok, bad = (yield _migrateAddressbook(fromAddressbook, toAddressbook,
lambda x: x.component()))
self.assertEqual(ok, 1)
self.assertEqual(bad, 1)
@inlineCallbacks
def test_migrateHomeFromFile(self):
"""
L{migrateHome} will migrate an L{IAddressbookHome} provider from one
backend to another; in this specific case, from the file-based backend
to the SQL-based backend.
"""
fromHome = yield self.fileTransaction().addressbookHomeWithUID("home1")
# Populate an arbitrary / unused dead properties so there's something
# to verify against.
key = PropertyName.fromElement(GETContentLanguage)
fromHome.properties()[key] = GETContentLanguage("C")
(yield fromHome.addressbookWithName("addressbook")).properties()[
key] = (
GETContentLanguage("pig-latin")
)
(yield fromHome.addressbookWithName("addressbook")).properties()[
PropertyName.fromElement(ResourceType)] = (
carddavxml.ResourceType.addressbook
)
toHome = yield self.transactionUnderTest().addressbookHomeWithUID(
"new-home", create=True
)
yield migrateHome(fromHome, toHome, lambda x: x.component())
toAddressbooks = yield toHome.addressbooks()
self.assertEquals(set([c.name() for c in toAddressbooks]),
set([k for k in self.requirements['home1'].keys()
if self.requirements['home1'][k] is not None]))
fromAddressbooks = yield fromHome.addressbooks()
for c in fromAddressbooks:
self.assertPropertiesSimilar(
c, (yield toHome.addressbookWithName(c.name())),
)
self.assertPropertiesSimilar(fromHome, toHome,)
def test_addressBookHomeVersion(self):
"""
The DATAVERSION column for new addressbook homes must match the
ADDRESSBOOK-DATAVERSION value.
"""
home = yield self.transactionUnderTest().addressbookHomeWithUID("home_version")
self.assertTrue(home is not None)
yield self.transactionUnderTest().commit
txn = yield self.transactionUnderTest()
version = yield txn.calendarserverValue("ADDRESSBOOK-DATAVERSION")[0][0]
ch = schema.ADDRESSBOOK_HOME
homeVersion = yield Select(
[ch.DATAVERSION, ],
From=ch,
Where=ch.OWNER_UID == "home_version",
).on(txn)[0][0]
self.assertEqual(int(homeVersion, version))
@inlineCallbacks
def test_homeProvisioningConcurrency(self):
"""
Test that two concurrent attempts to provision a addressbook home do not
cause a race-condition whereby the second commit results in a second
C{INSERT} that violates a unique constraint. Also verify that, while
the two provisioning attempts are happening and doing various lock
operations, that we do not block other reads of the table.
"""
addressbookStore = self.store
txn1 = addressbookStore.newTransaction()
txn2 = addressbookStore.newTransaction()
txn3 = addressbookStore.newTransaction()
# Provision one home now - we will use this to later verify we can do
# reads of existing data in the table
home_uid2 = yield txn3.homeWithUID(EADDRESSBOOKTYPE, "uid2", create=True)
self.assertNotEqual(home_uid2, None)
yield txn3.commit()
home_uid1_1 = yield txn1.homeWithUID(
EADDRESSBOOKTYPE, "uid1", create=True
)
@inlineCallbacks
def _defer_home_uid1_2():
home_uid1_2 = yield txn2.homeWithUID(
EADDRESSBOOKTYPE, "uid1", create=True
)
yield txn2.commit()
returnValue(home_uid1_2)
d1 = _defer_home_uid1_2()
@inlineCallbacks
def _pause_home_uid1_1():
yield deferLater(reactor, 1.0, lambda : None)
yield txn1.commit()
d2 = _pause_home_uid1_1()
# Verify that we can still get to the existing home - i.e. the lock
# on the table allows concurrent reads
txn4 = addressbookStore.newTransaction()
home_uid2 = yield txn4.homeWithUID(EADDRESSBOOKTYPE, "uid2", create=True)
self.assertNotEqual(home_uid2, None)
yield txn4.commit()
# Now do the concurrent provision attempt
yield d2
home_uid1_2 = yield d1
self.assertNotEqual(home_uid1_1, None)
self.assertNotEqual(home_uid1_2, None)
@inlineCallbacks
def test_putConcurrency(self):
"""
Test that two concurrent attempts to PUT different address book object resources to the
same address book home does not cause a deadlock.
"""
addressbookStore = self.store
# Provision the home and addressbook now
txn = addressbookStore.newTransaction()
home = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
self.assertNotEqual(home, None)
adbk = yield home.addressbookWithName("addressbook")
self.assertNotEqual(adbk, None)
yield txn.commit()
txn1 = addressbookStore.newTransaction()
txn2 = addressbookStore.newTransaction()
home1 = yield txn1.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
home2 = yield txn2.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
adbk1 = yield home1.addressbookWithName("addressbook")
adbk2 = yield home2.addressbookWithName("addressbook")
@inlineCallbacks
def _defer1():
yield adbk1.createAddressBookObjectWithName("1.vcf", VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
N:Thompson;Default1;;;
FN:Default1 Thompson
EMAIL;type=INTERNET;type=WORK;type=pref:[email protected]
TEL;type=WORK;type=pref:1-555-555-5555
TEL;type=CELL:1-444-444-4444
item1.ADR;type=WORK;type=pref:;;1245 Test;Sesame Street;California;11111;USA
item1.X-ABADR:us
UID:uid1
END:VCARD
""".replace("\n", "\r\n")
))
yield txn1.commit() # FIXME: CONCURRENT
d1 = _defer1()
@inlineCallbacks
def _defer2():
yield adbk2.createAddressBookObjectWithName("2.vcf", VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
N:Thompson;Default2;;;
FN:Default2 Thompson
EMAIL;type=INTERNET;type=WORK;type=pref:[email protected]
TEL;type=WORK;type=pref:1-555-555-5556
TEL;type=CELL:1-444-444-4445
item1.ADR;type=WORK;type=pref:;;1234 Test;Sesame Street;California;11111;USA
item1.X-ABADR:us
UID:uid2
END:VCARD
""".replace("\n", "\r\n")
))
yield txn2.commit() # FIXME: CONCURRENT
d2 = _defer2()
yield d1
yield d2
@inlineCallbacks
def test_notificationsProvisioningConcurrency(self):
"""
Test that two concurrent attempts to provision a notifications collection do not
cause a race-condition whereby the second commit results in a second
C{INSERT} that violates a unique constraint.
"""
addressbookStore = self.store
txn1 = addressbookStore.newTransaction()
txn2 = addressbookStore.newTransaction()
notification_uid1_1 = yield txn1.notificationsWithUID(
"uid1",
)
@inlineCallbacks
def _defer_notification_uid1_2():
notification_uid1_2 = yield txn2.notificationsWithUID(
"uid1",
)
yield txn2.commit()
returnValue(notification_uid1_2)
d1 = _defer_notification_uid1_2()
@inlineCallbacks
def _pause_notification_uid1_1():
yield deferLater(reactor, 1.0, lambda : None)
yield txn1.commit()
d2 = _pause_notification_uid1_1()
# Now do the concurrent provision attempt
yield d2
notification_uid1_2 = yield d1
self.assertNotEqual(notification_uid1_1, None)
self.assertNotEqual(notification_uid1_2, None)
@inlineCallbacks
def test_addressbookObjectUID(self):
"""
Test that kind property UID is stored correctly in database
"""
addressbookStore = self.store
# Provision the home and addressbook, one user and one group
txn = addressbookStore.newTransaction()
home = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
self.assertNotEqual(home, None)
adbk = yield home.addressbookWithName("addressbook")
self.assertNotEqual(adbk, None)
person = VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
N:Thompson;Default;;;
FN:Default Thompson
EMAIL;type=INTERNET;type=WORK;type=pref:[email protected]
TEL;type=WORK;type=pref:1-555-555-5555
TEL;type=CELL:1-444-444-4444
item1.ADR;type=WORK;type=pref:;;1245 Test;Sesame Street;California;11111;USA
item1.X-ABADR:us
UID:uid1
END:VCARD
""".replace("\n", "\r\n")
)
self.assertEqual(person.resourceUID(), "uid1")
abObject = yield adbk.createAddressBookObjectWithName("1.vcf", person)
self.assertEqual(abObject.uid(), "uid1")
yield txn.commit()
txn = addressbookStore.newTransaction()
home = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
adbk = yield home.addressbookWithName("addressbook")
abObject = yield adbk.objectResourceWithName("1.vcf")
person = yield abObject.component()
self.assertEqual(person.resourceUID(), "uid1")
yield home.removeAddressBookWithName("addressbook")
yield txn.commit()
@inlineCallbacks
def test_addressbookObjectKind(self):
"""
Test that kind property vCard is stored correctly in database
"""
addressbookStore = self.store
# Provision the home and addressbook, one user and one group
txn = addressbookStore.newTransaction()
home = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
self.assertNotEqual(home, None)
adbk = yield home.addressbookWithName("addressbook")
self.assertNotEqual(adbk, None)
person = VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
N:Thompson;Default;;;
FN:Default Thompson
EMAIL;type=INTERNET;type=WORK;type=pref:[email protected]
TEL;type=WORK;type=pref:1-555-555-5555
TEL;type=CELL:1-444-444-4444
item1.ADR;type=WORK;type=pref:;;1245 Test;Sesame Street;California;11111;USA
item1.X-ABADR:us
UID:uid1
END:VCARD
""".replace("\n", "\r\n")
)
self.assertEqual(person.resourceKind(), None)
abObject = yield adbk.createAddressBookObjectWithName("p.vcf", person)
self.assertEqual(abObject.kind(), _ABO_KIND_PERSON)
group = VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
UID:uid2
FN:Top Group
N:Top Group;;;;
REV:20120503T194243Z
X-ADDRESSBOOKSERVER-KIND:group
X-ADDRESSBOOKSERVER-MEMBER:urn:uuid:uid1
END:VCARD
""".replace("\n", "\r\n")
)
abObject = self.assertEqual(group.resourceKind(), "group")
abObject = yield adbk.createAddressBookObjectWithName("g.vcf", group)
self.assertEqual(abObject.kind(), _ABO_KIND_GROUP)
badgroup = VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
UID:uid3
FN:Bad Group
N:Bad Group;;;;
REV:20120503T194243Z
X-ADDRESSBOOKSERVER-KIND:badgroup
X-ADDRESSBOOKSERVER-MEMBER:urn:uuid:uid1
END:VCARD
""".replace("\n", "\r\n")
)
abObject = self.assertEqual(badgroup.resourceKind(), "badgroup")
abObject = yield adbk.createAddressBookObjectWithName("bg.vcf", badgroup)
self.assertEqual(abObject.kind(), _ABO_KIND_PERSON)
yield txn.commit()
txn = addressbookStore.newTransaction()
home = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
adbk = yield home.addressbookWithName("addressbook")
abObject = yield adbk.objectResourceWithName("p.vcf")
person = yield abObject.component()
self.assertEqual(person.resourceKind(), None)
self.assertEqual(abObject.kind(), _ABO_KIND_PERSON)
abObject = yield adbk.objectResourceWithName("g.vcf")
group = yield abObject.component()
self.assertEqual(group.resourceKind(), "group")
self.assertEqual(abObject.kind(), _ABO_KIND_GROUP)
abObject = yield adbk.objectResourceWithName("bg.vcf")
badgroup = yield abObject.component()
self.assertEqual(badgroup.resourceKind(), "badgroup")
self.assertEqual(abObject.kind(), _ABO_KIND_PERSON)
yield home.removeAddressBookWithName("addressbook")
yield txn.commit()
@inlineCallbacks
def test_addressbookObjectMembers(self):
"""
Test that kind property vCard is stored correctly in database
"""
addressbookStore = self.store
yield cleanStore(self, addressbookStore)
# Provision the home and addressbook, one user and one group
txn = addressbookStore.newTransaction()
home = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
self.assertNotEqual(home, None)
adbk = yield home.addressbookWithName("addressbook")
self.assertNotEqual(adbk, None)
person = VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
N:Thompson;Default;;;
FN:Default Thompson
EMAIL;type=INTERNET;type=WORK;type=pref:[email protected]
TEL;type=WORK;type=pref:1-555-555-5555
TEL;type=CELL:1-444-444-4444
item1.ADR;type=WORK;type=pref:;;1245 Test;Sesame Street;California;11111;USA
item1.X-ABADR:us
UID:uid1
END:VCARD
""".replace("\n", "\r\n")
)
self.assertEqual(person.resourceKind(), None)
personObject = yield adbk.createAddressBookObjectWithName("p.vcf", person)
group = VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
UID:uid2
FN:Top Group
N:Top Group;;;;
REV:20120503T194243Z
X-ADDRESSBOOKSERVER-KIND:group
X-ADDRESSBOOKSERVER-MEMBER:urn:uuid:uid3
END:VCARD
""".replace("\n", "\r\n")
)
groupObject = yield adbk.createAddressBookObjectWithName("g.vcf", group)
aboMembers = schema.ABO_MEMBERS
memberRows = yield Select([aboMembers.GROUP_ID, aboMembers.MEMBER_ID], From=aboMembers, Where=aboMembers.REMOVED == False).on(txn)
self.assertEqual(memberRows, [])
aboForeignMembers = schema.ABO_FOREIGN_MEMBERS
foreignMemberRows = yield Select([aboForeignMembers.GROUP_ID, aboForeignMembers.MEMBER_ADDRESS], From=aboForeignMembers).on(txn)
self.assertEqual(foreignMemberRows, [[groupObject._resourceID, "urn:uuid:uid3"]])
subgroup = VCard.fromString(
"""BEGIN:VCARD
VERSION:3.0
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
UID:uid3
FN:Sub Group
N:Sub Group;;;;
REV:20120503T194243Z
X-ADDRESSBOOKSERVER-KIND:group
X-ADDRESSBOOKSERVER-MEMBER:urn:uuid:uid1
END:VCARD
""".replace("\n", "\r\n")
)
subgroupObject = yield adbk.createAddressBookObjectWithName("sg.vcf", subgroup)
memberRows = yield Select([aboMembers.GROUP_ID, aboMembers.MEMBER_ID], From=aboMembers, Where=aboMembers.REMOVED == False).on(txn)
self.assertEqual(sorted(memberRows), sorted([
[groupObject._resourceID, subgroupObject._resourceID],
[subgroupObject._resourceID, personObject._resourceID],
]))
foreignMemberRows = yield Select([aboForeignMembers.GROUP_ID, aboForeignMembers.MEMBER_ADDRESS], From=aboForeignMembers).on(txn)
self.assertEqual(foreignMemberRows, [])
yield subgroupObject.remove()
memberRows = yield Select([aboMembers.GROUP_ID, aboMembers.MEMBER_ID, aboMembers.REMOVED, aboMembers.REVISION], From=aboMembers).on(txn)
# combine by groupID
groupIDToMemberRowMap = {}
for groupID, id, removed, version in memberRows:
memberRow = groupIDToMemberRowMap.get(groupID, [])
memberRow.append((id, removed, version))
groupIDToMemberRowMap[groupID] = memberRow
# see if this object is in current version
groupIDs = set([
groupID for groupID, memberIDRemovedRevisionRows in groupIDToMemberRowMap.iteritems()
if AddressBook._currentMemberIDsFromMemberIDRemovedRevisionRows(memberIDRemovedRevisionRows)
])
self.assertEqual(len(groupIDs), 0)
foreignMemberRows = yield Select(
[aboForeignMembers.GROUP_ID, aboForeignMembers.MEMBER_ADDRESS], From=aboForeignMembers,
).on(txn)
self.assertEqual(foreignMemberRows, [[groupObject._resourceID, "urn:uuid:uid3"]])
yield home.removeAddressBookWithName("addressbook")
yield txn.commit()
@inlineCallbacks
def test_removeAddressBookPropertiesOnDelete(self):
"""
L{IAddressBookHome.removeAddressBookWithName} clears an address book that already
exists and makes sure added properties are also removed.
"""
prop = schema.RESOURCE_PROPERTY
_allWithID = Select([prop.NAME, prop.VIEWER_UID, prop.VALUE],
From=prop,
Where=prop.RESOURCE_ID == Parameter("resourceID"))
# Create address book and add a property
home = yield self.homeUnderTest()
addressbook = home.addressbook()
resourceID = home._addressbookPropertyStoreID
rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
self.assertEqual(len(tuple(rows)), 0)
addressbookProperties = addressbook.properties()
prop = carddavxml.AddressBookDescription.fromString("Address Book prop to be removed")
addressbookProperties[PropertyName.fromElement(prop)] = prop
yield self.commit()
# Check that two properties are present
home = yield self.homeUnderTest()
rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
self.assertEqual(len(tuple(rows)), 1)
yield self.commit()
# Remove address book and check for no properties
home = yield self.homeUnderTest()
yield home.removeAddressBookWithName(addressbook.name())
rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
self.assertEqual(len(tuple(rows)), 0)
yield self.commit()
# Recheck it
rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
self.assertEqual(len(tuple(rows)), 0)
yield self.commit()
@inlineCallbacks
def test_removeAddressBookObjectPropertiesOnDelete(self):
"""
L{IAddressBookHome.removeAddressBookWithName} removes an address book object that already
exists and makes sure properties are also removed (which is always the case as right
now address book objects never have properties).
"""
# Create address book object
adbk1 = yield self.addressbookUnderTest()
name = "4.vcf"
component = VComponent.fromString(vcard4_text)
addressobject = yield adbk1.createAddressBookObjectWithName(name, component, options={})
resourceID = addressobject._resourceID
prop = schema.RESOURCE_PROPERTY
_allWithID = Select([prop.NAME, prop.VIEWER_UID, prop.VALUE],
From=prop,
Where=prop.RESOURCE_ID == Parameter("resourceID"))
# No properties on existing address book object
rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
self.assertEqual(len(tuple(rows)), 0)
yield self.commit()
# Remove address book object and check for no properties
adbk1 = yield self.addressbookUnderTest()
obj1 = yield adbk1.addressbookObjectWithName(name)
yield obj1.remove()
rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
self.assertEqual(len(tuple(rows)), 0)
yield self.commit()
# Recheck it
rows = yield _allWithID.on(self.transactionUnderTest(), resourceID=resourceID)
self.assertEqual(len(tuple(rows)), 0)
yield self.commit()
@inlineCallbacks
def test_directShareCreateConcurrency(self):
"""
Test that two concurrent attempts to create a direct shared addressbook
work concurrently without an exception.
"""
addressbookStore = self.store
# Provision the home and addressbook now
txn = addressbookStore.newTransaction()
sharerHome = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
self.assertNotEqual(sharerHome, None)
ab = yield sharerHome.addressbookWithName("addressbook")
self.assertNotEqual(ab, None)
shareeHome = yield txn.homeWithUID(EADDRESSBOOKTYPE, "uid2", create=True)
self.assertNotEqual(shareeHome, None)
yield txn.commit()
txn1 = addressbookStore.newTransaction()
txn2 = addressbookStore.newTransaction()
sharerHome1 = yield txn1.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
self.assertNotEqual(sharerHome1, None)
ab1 = yield sharerHome1.addressbookWithName("addressbook")
self.assertNotEqual(ab1, None)
shareeHome1 = yield txn1.homeWithUID(EADDRESSBOOKTYPE, "uid2", create=True)
self.assertNotEqual(shareeHome1, None)
sharerHome2 = yield txn2.homeWithUID(EADDRESSBOOKTYPE, "uid1", create=True)
self.assertNotEqual(sharerHome2, None)
ab2 = yield sharerHome2.addressbookWithName("addressbook")
self.assertNotEqual(ab2, None)
shareeHome2 = yield txn1.homeWithUID(EADDRESSBOOKTYPE, "uid2", create=True)
self.assertNotEqual(shareeHome2, None)
@inlineCallbacks
def _defer1():
yield ab1.directShareWithUser("uid2")
yield txn1.commit()
d1 = _defer1()
@inlineCallbacks
def _defer2():
yield ab2.directShareWithUser("uid1")
yield txn2.commit()
d2 = _defer2()
yield d1
yield d2
@inlineCallbacks
def test_resourceLock(self):
"""
Test CommonObjectResource.lock to make sure it locks, raises on missing resource,
and raises when locked and wait=False used.
"""
# Valid object
resource = yield self.addressbookObjectUnderTest()
# Valid lock
yield resource.lock()
self.assertTrue(resource._locked)
# Setup a new transaction to verify the lock and also verify wait behavior
newTxn = self.store.newTransaction()
newResource = yield self.addressbookObjectUnderTest(txn=newTxn)
try:
yield newResource.lock(wait=False)
except:
pass # OK
else:
self.fail("Expected an exception")
self.assertFalse(newResource._locked)
yield newTxn.abort()
# Commit existing transaction and verify we can get the lock using
yield self.commit()
resource = yield self.addressbookObjectUnderTest()
yield resource.lock()
self.assertTrue(resource._locked)
# Setup a new transaction to verify the lock but pass in an alternative txn directly
newTxn = self.store.newTransaction()
# FIXME: not sure why, but without this statement here, this portion of the test fails in a funny way.
# Basically the query in the try block seems to execute twice, failing each time, one of which is caught,
# and the other not - causing the test to fail. Seems like some state on newTxn is not being initialized?
yield self.addressbookObjectUnderTest(txn=newTxn, name="2.vcf")
try:
yield resource.lock(wait=False, useTxn=newTxn)
except:
pass # OK
else:
self.fail("Expected an exception")
self.assertTrue(resource._locked)
# Test missing resource
resource2 = yield self.addressbookObjectUnderTest(name="2.vcf")
resource2._resourceID = 123456789
try:
yield resource2.lock()
except NoSuchObjectResourceError:
pass # OK
except:
self.fail("Expected a NoSuchObjectResourceError exception")
else:
self.fail("Expected an exception")
self.assertFalse(resource2._locked)
@inlineCallbacks
def test_loadObjectResourcesWithName(self):
"""
L{CommonHomeChild.objectResourcesWithNames} returns the correct set of object resources
properly configured with a loaded property store. make sure batching works.
"""
@inlineCallbacks
def _tests(ab):
resources = yield ab.objectResourcesWithNames(("1.vcf",))
self.assertEqual(set([resource.name() for resource in resources]), set(("1.vcf",)))
resources = yield ab.objectResourcesWithNames(("1.vcf", "2.vcf",))
self.assertEqual(set([resource.name() for resource in resources]), set(("1.vcf", "2.vcf",)))
resources = yield ab.objectResourcesWithNames(("1.vcf", "2.vcf", "3.vcf",))
self.assertEqual(set([resource.name() for resource in resources]), set(("1.vcf", "2.vcf", "3.vcf",)))
resources = yield ab.objectResourcesWithNames(("bogus1.vcf",))
self.assertEqual(set([resource.name() for resource in resources]), set())
resources = yield ab.objectResourcesWithNames(("bogus1.vcf", "2.vcf",))
self.assertEqual(set([resource.name() for resource in resources]), set(("2.vcf",)))
# Basic load tests
ab = yield self.addressbookUnderTest()
yield _tests(ab)
# Adjust batch size and try again
self.patch(CommonObjectResource, "BATCH_LOAD_SIZE", 2)
yield _tests(ab)
yield self.commit()
@inlineCallbacks
def test_objectResourceWithID(self):
"""
L{IAddressBookHome.objectResourceWithID} will return the addressbook object..
"""
home = yield self.homeUnderTest()
addressbookObject = (yield home.objectResourceWithID(9999))
self.assertEquals(addressbookObject, None)
obj = (yield self.addressbookObjectUnderTest())
addressbookObject = (yield home.objectResourceWithID(obj._resourceID))
self.assertNotEquals(addressbookObject, None)
|
trevor/calendarserver
|
txdav/carddav/datastore/test/test_sql.py
|
Python
|
apache-2.0
| 32,730 |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture for predictive model, including CDNA, DNA, and STP."""
"""use directly from flow data"""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python import layers as tf_layers
from lstm_ops import basic_conv_lstm_cell
from tensorflow.python.ops import init_ops
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def sample_gumbel(shape, eps=1e-20):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape,minval=0,maxval=1)
return -tf.log(-tf.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
return tf.nn.softmax( y / temperature)
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
#k = tf.shape(logits)[-1]
#y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype)
y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,1,keep_dims=True)),y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y
def construct_model(image):
"""Build convolutional lstm video predictor using STP, CDNA, or DNA.
Args:
images: tensor of ground truth image sequences
actions: tensor of action sequences
states: tensor of ground truth state sequences
iter_num: tensor of the current training iteration (for sched. sampling)
k: constant used for scheduled sampling. -1 to feed in own prediction.
use_state: True to include state and action in prediction
num_masks: the number of different pixel motion predictions (and
the number of masks for each of those predictions)
stp: True to use Spatial Transformer Predictor (STP)
cdna: True to use Convoluational Dynamic Neural Advection (CDNA)
dna: True to use Dynamic Neural Advection (DNA)
context_frames: number of ground truth frames to pass in before
feeding in own predictions
Returns:
gen_images: predicted future image frames
gen_states: predicted future states
Raises:
ValueError: if more than one network option specified or more than 1 mask
specified for DNA model.
"""
batch_size, img_height, img_width, color_channels = image.get_shape()[0:4]
lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 128, 256, 256, 512, 256, 128, 64, 32]))
#############################
#128*128
enc0_s = slim.layers.conv2d(
image,
32, [5, 5],
stride=2,
scope='scale1_conv1_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1_s'})
hidden1_s = slim.layers.conv2d(
enc0_s,
lstm_size[0], [5, 5],
stride=1,
scope='state1_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm2_s'})
hidden2_s = slim.layers.conv2d(
hidden1_s,
lstm_size[1], [5, 5],
stride=1,
scope='state2_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm3_s'})
#64*64
enc1_s = slim.layers.conv2d(
hidden2_s, hidden2_s.get_shape()[3], [3, 3], stride=2, scope='conv2_s')
hidden3_s = slim.layers.conv2d(
enc1_s,
lstm_size[2], [5, 5],
stride=1,
scope='state3_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm4_s'})
hidden4_s = slim.layers.conv2d(
hidden3_s,
lstm_size[3], [5, 5],
stride=1,
scope='state4_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm5_s'})
#32*32
enc2_s = slim.layers.conv2d(
hidden4_s, hidden4_s.get_shape()[3], [3, 3], stride=2, scope='conv3_s')
hidden5_s = slim.layers.conv2d(
enc2_s,
lstm_size[4], [5, 5],
stride=1,
scope='state5_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm6_s'})
hidden6_s = slim.layers.conv2d(
hidden5_s,
lstm_size[5], [5, 5],
stride=1,
scope='state6_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm7_s'})
#16*16
enc3_s = slim.layers.conv2d(
hidden6_s, hidden6_s.get_shape()[3], [3, 3], stride=2, scope='conv4_s')
hidden7_s = slim.layers.conv2d(
enc3_s,
lstm_size[6], [5, 5],
stride=1,
scope='state7_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm8_s'})
hidden8_s = slim.layers.conv2d(
hidden7_s,
lstm_size[7], [5, 5],
stride=1,
scope='state8_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm9_s'})
#8*8
enc4_s = slim.layers.conv2d(
hidden8_s, hidden8_s.get_shape()[3], [3, 3], stride=2, scope='conv5_s')
enc5_s = slim.layers.conv2d(
enc4_s, hidden8_s.get_shape()[3], [1, 1], stride=1, scope='conv6_s')
hidden9_s = slim.layers.conv2d(
enc5_s,
lstm_size[8], [5, 5],
stride=1,
scope='state9_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm10_s'})
#16*16
enc6_s = slim.layers.conv2d_transpose(
hidden9_s, hidden9_s.get_shape()[3], 4, stride=2, scope='convt1_s')
hidden10_s = slim.layers.conv2d(
enc6_s,
lstm_size[9], [5, 5],
stride=1,
scope='state10_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm11_s'})
# Skip connection.
hidden10_s = tf.concat(axis=3, values=[hidden10_s, enc3_s]) # both 16x16
#32*32
enc7_s = slim.layers.conv2d_transpose(
hidden10_s, hidden10_s.get_shape()[3], 4, stride=2, scope='convt2_s')
hidden11_s = slim.layers.conv2d(
enc7_s,
lstm_size[10], [5, 5],
stride=1,
scope='state11_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm12_s'})
# Skip connection.
hidden11_s= tf.concat(axis=3, values=[hidden11_s, enc2_s]) # both 32x32
#64*64
enc8_s = slim.layers.conv2d_transpose(
hidden11_s, hidden11_s.get_shape()[3], 4, stride=2, scope='convt3_s')
hidden12_s = slim.layers.conv2d(
enc8_s,
lstm_size[11], [5, 5],
stride=1,
scope='state12_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm13_s'})
# Skip connection.
hidden12_s= tf.concat(axis=3, values=[hidden12_s, enc1_s]) # both 64x64
#128*128
enc9_s = slim.layers.conv2d_transpose(
hidden12_s, hidden12_s.get_shape()[3], 4, stride=2, scope='convt4_s')
hidden13_s = slim.layers.conv2d(
enc9_s,
lstm_size[12], [5, 5],
stride=1,
scope='state13_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm14_s'})
# Skip connection.
hidden13_s= tf.concat(axis=3, values=[hidden13_s, enc0_s]) # both 128x128
enc10_s = slim.layers.conv2d_transpose(
hidden13_s,
hidden13_s.get_shape()[3], 4, stride=2, scope='convt5_s',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm15_s'})
masks_h = slim.layers.conv2d_transpose(
enc10_s, 2, 1, stride=1, scope='convt6_s')#, biases_initializer=tf.constant_initializer([0.0, 1.0]))
masks_probs_h = tf.nn.softmax(tf.reshape(masks_h, [-1, 2]))
#entropy_losses.append(tf.reduce_mean(-tf.reduce_sum(masks_probs * tf.log(masks_probs + 1e-10), [1])))
masks_h = tf.reshape(
masks_probs_h,
#gumbel_softmax(tf.reshape(masks, [-1, num_masks]), TEMP, hard=False),
[int(batch_size), int(img_height), int(img_width), 2])
edge_mask_h = tf.split(axis=3, num_or_size_splits=2, value=masks_h)[0]
# masks_v = slim.layers.conv2d_transpose(
# enc10_s, 2, 1, stride=1, scope='convt7_s')#, biases_initializer=tf.constant_initializer([0.0, 1.0]))
# masks_probs_v = tf.nn.softmax(tf.reshape(masks_v, [-1, 2]))
# #entropy_losses.append(tf.reduce_mean(-tf.reduce_sum(masks_probs * tf.log(masks_probs + 1e-10), [1])))
# masks_v = tf.reshape(
# masks_probs_v,
# #gumbel_softmax(tf.reshape(masks, [-1, num_masks]), TEMP, hard=False),
# [int(batch_size), int(img_height), int(img_width), 2])
# edge_mask_v = tf.split(axis=3, num_or_size_splits=2, value=masks_v)[0]
return edge_mask_h
|
wangyang59/tf_models
|
video_prediction/prediction_model_flo_edge.py
|
Python
|
apache-2.0
| 9,575 |
import random
from base64 import b64encode, b64decode
import mock
from six.moves.urllib.parse import parse_qs, urlparse
from pyramid.httpexceptions import HTTPBadRequest
from kinto.core.utils import json
from . import BaseTest
class PaginationTest(BaseTest):
def setUp(self):
super(PaginationTest, self).setUp()
self.patch_known_field.start()
indices = list(range(20))
random.shuffle(indices)
for i in indices:
record = {
'title': 'MoFo #{0:02}'.format(i),
'status': i % 4,
'unread': (i % 2 == 0)
}
self.model.create_record(record)
def _setup_next_page(self):
next_page = self.last_response.headers['Next-Page']
url_fragments = urlparse(next_page)
queryparams = parse_qs(url_fragments.query)
self.resource.request.GET['_token'] = queryparams['_token'][0]
self.resource.request.GET['_limit'] = queryparams['_limit'][0]
self.last_response.headers = {}
return queryparams
def test_return_data(self):
result = self.resource.collection_get()
self.assertEqual(len(result['data']), 20)
def test_handle_limit(self):
self.resource.request.GET = {'_limit': '10'}
result = self.resource.collection_get()
self.assertEqual(len(result['data']), 10)
def test_handle_forced_limit(self):
with mock.patch.dict(self.resource.request.registry.settings, [
('paginate_by', 10)]):
result = self.resource.collection_get()
self.assertEqual(len(result['data']), 10)
def test_forced_limit_has_precedence_over_provided_limit(self):
with mock.patch.dict(self.resource.request.registry.settings, [
('paginate_by', 5)]):
self.resource.request.GET = {'_limit': '10'}
result = self.resource.collection_get()
self.assertEqual(len(result['data']), 5)
def test_return_next_page_url_is_given_in_headers(self):
self.resource.request.GET = {'_limit': '10'}
self.resource.collection_get()
self.assertIn('Next-Page', self.last_response.headers)
def test_next_page_url_has_got_querystring(self):
self.resource.request.GET = {'_limit': '10'}
self.resource.collection_get()
queryparams = self._setup_next_page()
self.assertIn('_limit', queryparams)
self.assertIn('_token', queryparams)
def test_next_page_url_gives_distinct_records(self):
self.resource.request.GET = {'_limit': '10'}
results1 = self.resource.collection_get()
self._setup_next_page()
results2 = self.resource.collection_get()
results_id1 = set([x['id'] for x in results1['data']])
results_id2 = set([x['id'] for x in results2['data']])
self.assertFalse(results_id1.intersection(results_id2))
def test_next_page_url_gives_distinct_records_with_forced_limit(self):
with mock.patch.dict(self.resource.request.registry.settings, [
('paginate_by', 5)]):
results1 = self.resource.collection_get()
self._setup_next_page()
results2 = self.resource.collection_get()
results_id1 = set([x['id'] for x in results1['data']])
results_id2 = set([x['id'] for x in results2['data']])
self.assertFalse(results_id1.intersection(results_id2))
def test_twice_the_same_next_page(self):
self.resource.request.GET = {'_limit': '10'}
self.resource.collection_get()
first_next = self.last_response.headers['Next-Page']
self.resource.collection_get()
second_next = self.last_response.headers['Next-Page']
self.assertEqual(first_next, second_next)
def test_stops_giving_next_page_at_the_end_of_first_page(self):
self.resource.collection_get()
self.assertNotIn('Next-Page', self.last_response.headers)
def test_stops_giving_next_page_at_the_end_sets(self):
self.resource.request.GET = {'_limit': '11'}
self.resource.collection_get()
self._setup_next_page()
self.resource.collection_get()
self.assertNotIn('Next-Page', self.last_response.headers)
def test_stops_giving_next_page_at_the_end_sets_on_exact_limit(self):
self.resource.request.GET = {'_limit': '10'}
self.resource.collection_get()
self._setup_next_page()
self.resource.collection_get()
self.assertNotIn('Next-Page', self.last_response.headers)
def test_handle_simple_sorting(self):
self.resource.request.GET = {'_sort': '-status', '_limit': '20'}
expected_results = self.resource.collection_get()
self.resource.request.GET['_limit'] = '10'
results1 = self.resource.collection_get()
self._setup_next_page()
results2 = self.resource.collection_get()
self.assertEqual(expected_results['data'],
results1['data'] + results2['data'])
def test_handle_multiple_sorting(self):
self.resource.request.GET = {'_sort': '-status,title', '_limit': '20'}
expected_results = self.resource.collection_get()
self.resource.request.GET['_limit'] = '10'
results1 = self.resource.collection_get()
self._setup_next_page()
results2 = self.resource.collection_get()
self.assertEqual(expected_results['data'],
results1['data'] + results2['data'])
def test_handle_filtering_sorting(self):
self.resource.request.GET = {'_sort': '-status,title', 'status': '2',
'_limit': '20'}
expected_results = self.resource.collection_get()
self.resource.request.GET['_limit'] = '3'
results1 = self.resource.collection_get()
self._setup_next_page()
results2 = self.resource.collection_get()
self.assertEqual(expected_results['data'],
results1['data'] + results2['data'])
def test_handle_sorting_desc(self):
self.resource.request.GET = {'_sort': 'status,-title', '_limit': '20'}
expected_results = self.resource.collection_get()
self.resource.request.GET['_limit'] = '10'
results1 = self.resource.collection_get()
self._setup_next_page()
results2 = self.resource.collection_get()
self.assertEqual(expected_results['data'],
results1['data'] + results2['data'])
def test_handle_since(self):
self.resource.request.GET = {'_since': '123', '_limit': '20'}
expected_results = self.resource.collection_get()
self.resource.request.GET['_limit'] = '10'
results1 = self.resource.collection_get()
self._setup_next_page()
results2 = self.resource.collection_get()
self.assertEqual(expected_results['data'],
results1['data'] + results2['data'])
def test_wrong_limit_raise_400(self):
self.resource.request.GET = {'_since': '123', '_limit': 'toto'}
self.assertRaises(HTTPBadRequest, self.resource.collection_get)
def test_token_wrong_base64(self):
self.resource.request.GET = {'_since': '123', '_limit': '20',
'_token': '123'}
self.assertRaises(HTTPBadRequest, self.resource.collection_get)
def test_token_wrong_json(self):
self.resource.request.GET = {
'_since': '123', '_limit': '20',
'_token': b64encode('{"toto":'.encode('ascii')).decode('ascii')}
self.assertRaises(HTTPBadRequest, self.resource.collection_get)
def test_token_wrong_json_fields(self):
badtoken = '{"toto": {"tutu": 1}}'
self.resource.request.GET = {
'_since': '123', '_limit': '20',
'_token': b64encode(badtoken.encode('ascii')).decode('ascii')}
self.assertRaises(HTTPBadRequest, self.resource.collection_get)
def test_raises_bad_request_if_token_has_bad_data_structure(self):
invalid_token = json.dumps([[('last_modified', 0, '>')]])
self.resource.request.GET = {
'_since': '123', '_limit': '20',
'_token': b64encode(invalid_token.encode('ascii')).decode('ascii')}
self.assertRaises(HTTPBadRequest, self.resource.collection_get)
class BuildPaginationTokenTest(BaseTest):
def setUp(self):
super(BuildPaginationTokenTest, self).setUp()
self.patch_known_field.start()
self.record = {
'id': 1, 'status': 2, 'unread': True,
'last_modified': 1234, 'title': 'Title'
}
def test_token_contains_current_offset(self):
token = self.resource._build_pagination_token([('last_modified', -1)],
self.record,
42)
tokeninfo = json.loads(b64decode(token).decode('ascii'))
self.assertEqual(tokeninfo['offset'], 42)
def test_no_sorting_default_to_modified_field(self):
token = self.resource._build_pagination_token([('last_modified', -1)],
self.record,
42)
tokeninfo = json.loads(b64decode(token).decode('ascii'))
self.assertDictEqual(tokeninfo['last_record'],
{"last_modified": 1234})
def test_sorting_handle_both_rules(self):
token = self.resource._build_pagination_token([
('status', -1),
('last_modified', -1)
], self.record, 34)
tokeninfo = json.loads(b64decode(token).decode('ascii'))
self.assertDictEqual(tokeninfo['last_record'],
{"last_modified": 1234, "status": 2})
def test_sorting_handle_ordering_direction(self):
token = self.resource._build_pagination_token([
('status', 1),
('last_modified', 1)
], self.record, 32)
tokeninfo = json.loads(b64decode(token).decode('ascii'))
self.assertEqual(tokeninfo['last_record'],
{"last_modified": 1234, "status": 2})
def test_multiple_sorting_keep_all(self):
token = self.resource._build_pagination_token([
('status', 1),
('title', -1),
('last_modified', -1)
], self.record, 31)
tokeninfo = json.loads(b64decode(token).decode('ascii'))
self.assertEqual(tokeninfo['last_record'],
{"last_modified": 1234, "status": 2,
'title': 'Title'})
|
monikagrabowska/osf.io
|
kinto/tests/core/resource/test_pagination.py
|
Python
|
apache-2.0
| 10,656 |
# Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI Routers for the Resource service."""
from keystone.common import router
from keystone.common import wsgi
from keystone.resource import controllers
class Routers(wsgi.RoutersBase):
def append_v3_routers(self, mapper, routers):
routers.append(
router.Router(controllers.Domain(),
'domains', 'domain',
resource_descriptions=self.v3_resources))
routers.append(
router.Router(controllers.Project(),
'projects', 'project',
resource_descriptions=self.v3_resources))
|
darren-wang/ks3
|
keystone/resource/routers.py
|
Python
|
apache-2.0
| 1,239 |
# general
import time
# peachbox imports
from peachbox.task import Task
from peachbox.connector import sink, source
from peachbox.pipeline import Chain, Validator
# tutorial
from pipelines.importer import UserReviewEdge, ProductReviewEdge, ReviewProperties
import model.master
class ImportMovieReviews(ScheduledTask):
def __init__(self):
super(ImportMovieReviews, self).__init__()
self.source = source.KafkaJSON(topic='movie_reviews')
self.sink = sink.MasterData()
def execute(self):
input = self.source.emit()['data']
# Import 'review by user edges'
user_review_validator = Validator(['time', 'user_id', 'product_id'])
user_review_chain = Chain([user_review_validator, UserReviewEdge()])
user_review_edges = user_review_chain.execute(input)
# Import 'product review edges'
product_review_validator = Validator(['time', 'user_id', 'product_id'])
product_review_chain = Chain([product_review_validator, ProductReviewEdge()])
product_review_edges = product_review_chain.execute(input)
# Import 'review properties'
required_fields = ['time', 'user_id', 'product_id', 'helpfulness', 'score', 'summary', 'review']
review_property_validator = Validator(required_fields)
review_properties = Chain([review_property_validator, ReviewProperties()]).execute(input)
self.sink.absorb([{'data':user_review_edges, 'model':model.master.UserReviewEdge},
{'data':product_review_edges, 'model':model.master.ProductReviewEdge},
{'data':review_properties, 'model':model.master.ReviewProperties}])
# Payload is sent with 'Finished Event'
self.payload = {'import_finished':int(time.time()), 'latest_kafka_offset':self.source.latest_offset}
|
PeachstoneIO/peachbox
|
tutorials/tutorial_movie_reviews/tasks/importer.py
|
Python
|
apache-2.0
| 1,848 |
# Copyright (c) 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.huawei import constants
from cinder.volume.drivers.huawei import fc_zone_helper
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume.drivers.huawei import rest_client
from cinder.volume.drivers.huawei import smartx
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
huawei_opts = [
cfg.StrOpt('cinder_huawei_conf_file',
default='/etc/cinder/cinder_huawei_conf.xml',
help='The configuration file for the Cinder Huawei '
'driver.')]
CONF = cfg.CONF
CONF.register_opts(huawei_opts)
class HuaweiBaseDriver(driver.VolumeDriver):
def __init__(self, *args, **kwargs):
super(HuaweiBaseDriver, self).__init__(*args, **kwargs)
self.configuration = kwargs.get('configuration')
if not self.configuration:
msg = _('_instantiate_driver: configuration not found.')
raise exception.InvalidInput(reason=msg)
self.configuration.append_config_values(huawei_opts)
self.xml_file_path = self.configuration.cinder_huawei_conf_file
def do_setup(self, context):
"""Instantiate common class and login storage system."""
self.restclient = rest_client.RestClient(self.configuration)
return self.restclient.login()
def check_for_setup_error(self):
"""Check configuration file."""
return huawei_utils.check_conf_file(self.xml_file_path)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
return self.restclient.update_volume_stats()
@utils.synchronized('huawei', external=True)
def create_volume(self, volume):
"""Create a volume."""
opts = huawei_utils.get_volume_params(volume)
smartx_opts = smartx.SmartX().get_smartx_specs_opts(opts)
params = huawei_utils.get_lun_params(self.xml_file_path,
smartx_opts)
pool_name = volume_utils.extract_host(volume['host'],
level='pool')
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name, pools)
if not pool_info:
msg = (_('Error in getting pool information for the pool: %s.')
% pool_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
volume_name = huawei_utils.encode_name(volume['id'])
volume_description = volume['name']
volume_size = huawei_utils.get_volume_size(volume)
LOG.info(_LI(
'Create volume: %(volume)s, size: %(size)s.'),
{'volume': volume_name,
'size': volume_size})
params['pool_id'] = pool_info['ID']
params['volume_size'] = volume_size
params['volume_description'] = volume_description
# Prepare LUN parameters.
lun_param = huawei_utils.init_lun_parameters(volume_name, params)
# Create LUN on the array.
lun_info = self.restclient.create_volume(lun_param)
lun_id = lun_info['ID']
try:
qos = huawei_utils.get_volume_qos(volume)
if qos:
smart_qos = smartx.SmartQos(self.restclient)
smart_qos.create_qos(qos, lun_id)
smartpartition = smartx.SmartPartition(self.restclient)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.restclient)
smartcache.add(opts, lun_id)
except Exception as err:
self._delete_lun_with_check(lun_id)
raise exception.InvalidInput(
reason=_('Create volume error. Because %s.') % err)
return {'provider_location': lun_info['ID'],
'ID': lun_id,
'lun_info': lun_info}
@utils.synchronized('huawei', external=True)
def delete_volume(self, volume):
"""Delete a volume.
Three steps:
Firstly, remove associate from lungroup.
Secondly, remove associate from QoS policy.
Thirdly, remove the lun.
"""
name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
LOG.info(_LI('Delete volume: %(name)s, array lun id: %(lun_id)s.'),
{'name': name, 'lun_id': lun_id},)
if lun_id:
if self.restclient.check_lun_exist(lun_id):
qos_id = self.restclient.get_qosid_by_lunid(lun_id)
if qos_id:
self.remove_qos_lun(lun_id, qos_id)
self.restclient.delete_lun(lun_id)
else:
LOG.warning(_LW("Can't find lun %s on the array."), lun_id)
return False
return True
def remove_qos_lun(self, lun_id, qos_id):
lun_list = self.restclient.get_lun_list_in_qos(qos_id)
lun_count = len(lun_list)
if lun_count <= 1:
qos = smartx.SmartQos(self.restclient)
qos.delete_qos(qos_id)
else:
self.restclient.remove_lun_from_qos(lun_id,
lun_list,
qos_id)
def _delete_lun_with_check(self, lun_id):
if lun_id:
if self.restclient.check_lun_exist(lun_id):
qos_id = self.restclient.get_qosid_by_lunid(lun_id)
if qos_id:
self.remove_qos_lun(lun_id, qos_id)
self.restclient.delete_lun(lun_id)
def _is_lun_migration_complete(self, src_id, dst_id):
result = self.restclient.get_lun_migration_task()
found_migration_task = False
if 'data' in result:
for item in result['data']:
if (src_id == item['PARENTID']
and dst_id == item['TARGETLUNID']):
found_migration_task = True
if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']:
return True
if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']:
err_msg = (_('Lun migration error.'))
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
if not found_migration_task:
err_msg = (_("Cannot find migration task."))
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return False
def _is_lun_migration_exist(self, src_id, dst_id):
try:
result = self.restclient.get_lun_migration_task()
except Exception:
LOG.error(_LE("Get LUN migration error."))
return False
if 'data' in result:
for item in result['data']:
if (src_id == item['PARENTID']
and dst_id == item['TARGETLUNID']):
return True
return False
def _migrate_lun(self, src_id, dst_id):
try:
self.restclient.create_lun_migration(src_id, dst_id)
def _is_lun_migration_complete():
return self._is_lun_migration_complete(src_id, dst_id)
wait_interval = constants.MIGRATION_WAIT_INTERVAL
huawei_utils.wait_for_condition(self.xml_file_path,
_is_lun_migration_complete,
wait_interval)
# Clean up if migration failed.
except Exception as ex:
raise exception.VolumeBackendAPIException(data=ex)
finally:
if self._is_lun_migration_exist(src_id, dst_id):
self.restclient.delete_lun_migration(src_id, dst_id)
self._delete_lun_with_check(dst_id)
LOG.debug("Migrate lun %s successfully.", src_id)
return True
def _wait_volume_ready(self, lun_id):
event_type = 'LUNReadyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
def _volume_ready():
result = self.restclient.get_lun_info(lun_id)
if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH
and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):
return True
return False
huawei_utils.wait_for_condition(self.xml_file_path,
_volume_ready,
wait_interval,
wait_interval * 10)
def _get_original_status(self, volume):
if not volume['volume_attachment']:
return 'available'
else:
return 'in-use'
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status=None):
original_name = huawei_utils.encode_name(volume['id'])
current_name = huawei_utils.encode_name(new_volume['id'])
lun_id = self.restclient.get_volume_by_name(current_name)
try:
self.restclient.rename_lun(lun_id, original_name)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Unable to rename lun %s on array.'), current_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
LOG.debug("Rename lun from %(current_name)s to %(original_name)s "
"successfully.",
{'current_name': current_name,
'original_name': original_name})
model_update = {'_name_id': None}
return model_update
def migrate_volume(self, ctxt, volume, host, new_type=None):
"""Migrate a volume within the same array."""
return self._migrate_volume(volume, host, new_type)
def _check_migration_valid(self, host, volume):
if 'pool_name' not in host['capabilities']:
return False
target_device = host['capabilities']['location_info']
# Source and destination should be on same array.
if target_device != self.restclient.device_id:
return False
# Same protocol should be used if volume is in-use.
protocol = huawei_utils.get_protocol(self.xml_file_path)
if (host['capabilities']['storage_protocol'] != protocol
and self._get_original_status(volume) == 'in-use'):
return False
pool_name = host['capabilities']['pool_name']
if len(pool_name) == 0:
return False
return True
def _migrate_volume(self, volume, host, new_type=None):
if not self._check_migration_valid(host, volume):
return (False, None)
type_id = volume['volume_type_id']
volume_type = None
if type_id:
volume_type = volume_types.get_volume_type(None, type_id)
pool_name = host['capabilities']['pool_name']
pools = self.restclient.find_all_pools()
pool_info = self.restclient.find_pool_info(pool_name, pools)
src_volume_name = huawei_utils.encode_name(volume['id'])
dst_volume_name = six.text_type(hash(src_volume_name))
src_id = volume.get('provider_location')
src_lun_params = self.restclient.get_lun_info(src_id)
opts = None
qos = None
if new_type:
# If new type exists, use new type.
opts = huawei_utils._get_extra_spec_value(
new_type['extra_specs'])
opts = smartx.SmartX().get_smartx_specs_opts(opts)
if 'LUNType' not in opts:
opts['LUNType'] = huawei_utils.find_luntype_in_xml(
self.xml_file_path)
qos = huawei_utils.get_qos_by_volume_type(new_type)
elif volume_type:
qos = huawei_utils.get_qos_by_volume_type(volume_type)
if not opts:
opts = huawei_utils.get_volume_params(volume)
opts = smartx.SmartX().get_smartx_specs_opts(opts)
lun_info = self._create_lun_with_extra_feature(pool_info,
dst_volume_name,
src_lun_params,
opts)
lun_id = lun_info['ID']
if qos:
LOG.info(_LI('QoS: %s.'), qos)
SmartQos = smartx.SmartQos(self.restclient)
SmartQos.create_qos(qos, lun_id)
if opts:
smartpartition = smartx.SmartPartition(self.restclient)
smartpartition.add(opts, lun_id)
smartcache = smartx.SmartCache(self.restclient)
smartcache.add(opts, lun_id)
dst_id = lun_info['ID']
self._wait_volume_ready(dst_id)
moved = self._migrate_lun(src_id, dst_id)
return moved, {}
def _create_lun_with_extra_feature(self, pool_info,
lun_name,
lun_params,
spec_opts):
LOG.info(_LI('Create a new lun %s for migration.'), lun_name)
# Prepare lun parameters.
lunparam = {"TYPE": '11',
"NAME": lun_name,
"PARENTTYPE": '216',
"PARENTID": pool_info['ID'],
"ALLOCTYPE": lun_params['ALLOCTYPE'],
"CAPACITY": lun_params['CAPACITY'],
"WRITEPOLICY": lun_params['WRITEPOLICY'],
"MIRRORPOLICY": lun_params['MIRRORPOLICY'],
"PREFETCHPOLICY": lun_params['PREFETCHPOLICY'],
"PREFETCHVALUE": lun_params['PREFETCHVALUE'],
"DATATRANSFERPOLICY": '0',
"READCACHEPOLICY": lun_params['READCACHEPOLICY'],
"WRITECACHEPOLICY": lun_params['WRITECACHEPOLICY'],
"OWNINGCONTROLLER": lun_params['OWNINGCONTROLLER'],
}
if 'LUNType' in spec_opts:
lunparam['ALLOCTYPE'] = spec_opts['LUNType']
if spec_opts['policy']:
lunparam['DATATRANSFERPOLICY'] = spec_opts['policy']
lun_info = self.restclient.create_volume(lunparam)
return lun_info
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
We use LUNcopy to copy a new volume from snapshot.
The time needed increases as volume size does.
"""
snapshotname = huawei_utils.encode_name(snapshot['id'])
snapshot_id = snapshot.get('provider_location')
if snapshot_id is None:
snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)
if snapshot_id is None:
err_msg = (_(
'create_volume_from_snapshot: Snapshot %(name)s '
'does not exist.')
% {'name': snapshotname})
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
lun_info = self.create_volume(volume)
tgt_lun_id = lun_info['ID']
luncopy_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, '
'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.'),
{'src_lun_id': snapshot_id,
'tgt_lun_id': tgt_lun_id,
'copy_name': luncopy_name})
event_type = 'LUNReadyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
def _volume_ready():
result = self.restclient.get_lun_info(tgt_lun_id)
if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH
and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY):
return True
return False
huawei_utils.wait_for_condition(self.xml_file_path,
_volume_ready,
wait_interval,
wait_interval * 10)
self._copy_volume(volume, luncopy_name,
snapshot_id, tgt_lun_id)
return {'ID': lun_info['ID'],
'lun_info': lun_info}
def create_cloned_volume(self, volume, src_vref):
"""Clone a new volume from an existing volume."""
# Form the snapshot structure.
snapshot = {'id': uuid.uuid4().__str__(),
'volume_id': src_vref['id'],
'volume': src_vref}
# Create snapshot.
self.create_snapshot(snapshot)
try:
# Create volume from snapshot.
lun_info = self.create_volume_from_snapshot(volume, snapshot)
finally:
try:
# Delete snapshot.
self.delete_snapshot(snapshot)
except exception.VolumeBackendAPIException:
LOG.warning(_LW(
'Failure deleting the snapshot %(snapshot_id)s '
'of volume %(volume_id)s.'),
{'snapshot_id': snapshot['id'],
'volume_id': src_vref['id']},)
return {'provider_location': lun_info['ID'],
'lun_info': lun_info}
@utils.synchronized('huawei', external=True)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
volume_size = huawei_utils.get_volume_size(volume)
new_volume_size = int(new_size) * units.Gi / 512
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'Extend volume: %(volumename)s, oldsize:'
' %(oldsize)s newsize: %(newsize)s.'),
{'volumename': volume_name,
'oldsize': volume_size,
'newsize': new_volume_size},)
lun_id = self.restclient.get_lunid(volume, volume_name)
luninfo = self.restclient.extend_volume(lun_id, new_volume_size)
return {'provider_location': luninfo['ID'],
'lun_info': luninfo}
@utils.synchronized('huawei', external=True)
def create_snapshot(self, snapshot):
snapshot_info = self.restclient.create_snapshot(snapshot)
snapshot_id = snapshot_info['ID']
self.restclient.activate_snapshot(snapshot_id)
return {'provider_location': snapshot_info['ID'],
'lun_info': snapshot_info}
@utils.synchronized('huawei', external=True)
def delete_snapshot(self, snapshot):
snapshotname = huawei_utils.encode_name(snapshot['id'])
volume_name = huawei_utils.encode_name(snapshot['volume_id'])
LOG.info(_LI(
'stop_snapshot: snapshot name: %(snapshot)s, '
'volume name: %(volume)s.'),
{'snapshot': snapshotname,
'volume': volume_name},)
snapshot_id = snapshot.get('provider_location')
if snapshot_id is None:
snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)
if snapshot_id is not None:
if self.restclient.check_snapshot_exist(snapshot_id):
self.restclient.stop_snapshot(snapshot_id)
self.restclient.delete_snapshot(snapshot_id)
else:
LOG.warning(_LW("Can't find snapshot on the array."))
else:
LOG.warning(_LW("Can't find snapshot on the array."))
return False
return True
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, "
"diff=%(diff)s, host=%(host)s.", {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
# Check what changes are needed
migration, change_opts, lun_id = self.determine_changes_when_retype(
volume, new_type, host)
try:
if migration:
LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with "
"change %(change_opts)s.",
{"lun_id": lun_id, "change_opts": change_opts})
if self._migrate_volume(volume, host, new_type):
return True
else:
LOG.warning(_LW("Storage-assisted migration failed during "
"retype."))
return False
else:
# Modify lun to change policy
self.modify_lun(lun_id, change_opts)
return True
except exception.VolumeBackendAPIException:
LOG.exception(_LE('Retype volume error.'))
return False
def modify_lun(self, lun_id, change_opts):
if change_opts.get('partitionid'):
old, new = change_opts['partitionid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.restclient.remove_lun_from_partition(lun_id, old_id)
if new_id:
self.restclient.add_lun_to_partition(lun_id, new_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartpartition from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) success."),
{"lun_id": lun_id,
"old_id": old_id, "old_name": old_name,
"new_id": new_id, "new_name": new_name})
if change_opts.get('cacheid'):
old, new = change_opts['cacheid']
old_id = old[0]
old_name = old[1]
new_id = new[0]
new_name = new[1]
if old_id:
self.restclient.remove_lun_from_cache(lun_id, old_id)
if new_id:
self.restclient.add_lun_to_cache(lun_id, new_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartcache from "
"(name: %(old_name)s, id: %(old_id)s) to "
"(name: %(new_name)s, id: %(new_id)s) successfully."),
{'lun_id': lun_id,
'old_id': old_id, "old_name": old_name,
'new_id': new_id, "new_name": new_name})
if change_opts.get('policy'):
old_policy, new_policy = change_opts['policy']
self.restclient.change_lun_smarttier(lun_id, new_policy)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smarttier policy from "
"%(old_policy)s to %(new_policy)s success."),
{'lun_id': lun_id,
'old_policy': old_policy,
'new_policy': new_policy})
if change_opts.get('qos'):
old_qos, new_qos = change_opts['qos']
old_qos_id = old_qos[0]
old_qos_value = old_qos[1]
if old_qos_id:
self.remove_qos_lun(lun_id, old_qos_id)
if new_qos:
smart_qos = smartx.SmartQos(self.restclient)
smart_qos.create_qos(new_qos, lun_id)
LOG.info(_LI("Retype LUN(id: %(lun_id)s) smartqos from "
"%(old_qos_value)s to %(new_qos)s success."),
{'lun_id': lun_id,
'old_qos_value': old_qos_value,
'new_qos': new_qos})
def get_lun_specs(self, lun_id):
lun_opts = {
'policy': None,
'partitionid': None,
'cacheid': None,
'LUNType': None,
}
lun_info = self.restclient.get_lun_info(lun_id)
lun_opts['LUNType'] = int(lun_info['ALLOCTYPE'])
if lun_info['DATATRANSFERPOLICY']:
lun_opts['policy'] = lun_info['DATATRANSFERPOLICY']
if lun_info['SMARTCACHEPARTITIONID']:
lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID']
if lun_info['CACHEPARTITIONID']:
lun_opts['partitionid'] = lun_info['CACHEPARTITIONID']
return lun_opts
def determine_changes_when_retype(self, volume, new_type, host):
migration = False
change_opts = {
'policy': None,
'partitionid': None,
'cacheid': None,
'qos': None,
'host': None,
'LUNType': None,
}
lun_id = volume.get('provider_location')
old_opts = self.get_lun_specs(lun_id)
new_specs = new_type['extra_specs']
new_opts = huawei_utils._get_extra_spec_value(new_specs)
new_opts = smartx.SmartX().get_smartx_specs_opts(new_opts)
if 'LUNType' not in new_opts:
new_opts['LUNType'] = huawei_utils.find_luntype_in_xml(
self.xml_file_path)
if volume['host'] != host['host']:
migration = True
change_opts['host'] = (volume['host'], host['host'])
if old_opts['LUNType'] != new_opts['LUNType']:
migration = True
change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType'])
new_cache_id = None
new_cache_name = new_opts['cachename']
if new_cache_name:
new_cache_id = self.restclient.get_cache_id_by_name(new_cache_name)
if new_cache_id is None:
msg = (_(
"Can't find cache name on the array, cache name is: "
"%(name)s.") % {'name': new_cache_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
new_partition_id = None
new_partition_name = new_opts['partitionname']
if new_partition_name:
new_partition_id = self.restclient.get_partition_id_by_name(
new_partition_name)
if new_partition_id is None:
msg = (_(
"Can't find partition name on the array, partition name "
"is: %(name)s.") % {'name': new_partition_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# smarttier
if old_opts['policy'] != new_opts['policy']:
change_opts['policy'] = (old_opts['policy'], new_opts['policy'])
# smartcache
old_cache_id = old_opts['cacheid']
if old_cache_id != new_cache_id:
old_cache_name = None
if old_cache_id:
cache_info = self.restclient.get_cache_info_by_id(old_cache_id)
old_cache_name = cache_info['NAME']
change_opts['cacheid'] = ([old_cache_id, old_cache_name],
[new_cache_id, new_cache_name])
# smartpartition
old_partition_id = old_opts['partitionid']
if old_partition_id != new_partition_id:
old_partition_name = None
if old_partition_id:
partition_info = self.restclient.get_partition_info_by_id(
old_partition_id)
old_partition_name = partition_info['NAME']
change_opts['partitionid'] = ([old_partition_id,
old_partition_name],
[new_partition_id,
new_partition_name])
# smartqos
new_qos = huawei_utils.get_qos_by_volume_type(new_type)
old_qos_id = self.restclient.get_qosid_by_lunid(lun_id)
old_qos = self._get_qos_specs_from_array(old_qos_id)
if old_qos != new_qos:
change_opts['qos'] = ([old_qos_id, old_qos], new_qos)
LOG.debug("Determine changes when retype. Migration: "
"%(migration)s, change_opts: %(change_opts)s.",
{'migration': migration, 'change_opts': change_opts})
return migration, change_opts, lun_id
def _get_qos_specs_from_array(self, qos_id):
qos = {}
qos_info = {}
if qos_id:
qos_info = self.restclient.get_qos_info(qos_id)
for key, value in qos_info.items():
if key.upper() in constants.QOS_KEYS:
if key.upper() == 'LATENCY' and value == '0':
continue
else:
qos[key.upper()] = value
return qos
def create_export(self, context, volume, connector):
"""Export a volume."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
def _copy_volume(self, volume, copy_name, src_lun, tgt_lun):
luncopy_id = self.restclient.create_luncopy(copy_name,
src_lun, tgt_lun)
event_type = 'LUNcopyWaitInterval'
wait_interval = huawei_utils.get_wait_interval(self.xml_file_path,
event_type)
try:
self.restclient.start_luncopy(luncopy_id)
def _luncopy_complete():
luncopy_info = self.restclient.get_luncopy_info(luncopy_id)
if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY:
# luncopy_info['status'] means for the running status of
# the luncopy. If luncopy_info['status'] is equal to '40',
# this luncopy is completely ready.
return True
elif luncopy_info['state'] != constants.STATUS_HEALTH:
# luncopy_info['state'] means for the healthy status of the
# luncopy. If luncopy_info['state'] is not equal to '1',
# this means that an error occurred during the LUNcopy
# operation and we should abort it.
err_msg = (_(
'An error occurred during the LUNcopy operation. '
'LUNcopy name: %(luncopyname)s. '
'LUNcopy status: %(luncopystatus)s. '
'LUNcopy state: %(luncopystate)s.')
% {'luncopyname': luncopy_id,
'luncopystatus': luncopy_info['status'],
'luncopystate': luncopy_info['state']},)
LOG.error(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
huawei_utils.wait_for_condition(self.xml_file_path,
_luncopy_complete,
wait_interval)
except Exception:
with excutils.save_and_reraise_exception():
self.restclient.delete_luncopy(luncopy_id)
self.delete_volume(volume)
self.restclient.delete_luncopy(luncopy_id)
class Huawei18000ISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver):
"""ISCSI driver for Huawei OceanStor 18000 storage arrays.
Version history:
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
CHAP support
Multiple pools support
ISCSI multipath support
SmartX support
Volume migration support
Volume retype support
"""
VERSION = "1.1.1"
def __init__(self, *args, **kwargs):
super(Huawei18000ISCSIDriver, self).__init__(*args, **kwargs)
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'iSCSI'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
@utils.synchronized('huawei', external=True)
def initialize_connection(self, volume, connector):
"""Map a volume to a host and return target iSCSI information."""
LOG.info(_LI('Enter initialize_connection.'))
initiator_name = connector['initiator']
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'initiator name: %(initiator_name)s, '
'volume name: %(volume)s.'),
{'initiator_name': initiator_name,
'volume': volume_name})
(iscsi_iqns,
target_ips,
portgroup_id) = self.restclient.get_iscsi_params(self.xml_file_path,
connector)
LOG.info(_LI('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, '
'target_ip: %(target_ip)s, '
'portgroup_id: %(portgroup_id)s.'),
{'iscsi_iqn': iscsi_iqns,
'target_ip': target_ips,
'portgroup_id': portgroup_id},)
# Create hostgroup if not exist.
host_name = connector['host']
host_name_before_hash = None
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name_before_hash = host_name
host_name = six.text_type(hash(host_name))
host_id = self.restclient.add_host_with_check(host_name,
host_name_before_hash)
# Add initiator to the host.
self.restclient.ensure_initiator_added(self.xml_file_path,
initiator_name,
host_id)
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
lun_id = self.restclient.get_lunid(volume, volume_name)
# Mapping lungroup and hostgroup to view.
self.restclient.do_mapping(lun_id, hostgroup_id,
host_id, portgroup_id)
hostlun_id = self.restclient.find_host_lun_id(host_id, lun_id)
LOG.info(_LI("initialize_connection, host lun id is: %s."),
hostlun_id)
iscsi_conf = huawei_utils.get_iscsi_conf(self.xml_file_path)
chapinfo = self.restclient.find_chap_info(iscsi_conf,
initiator_name)
# Return iSCSI properties.
properties = {}
properties['target_discovered'] = False
properties['volume_id'] = volume['id']
multipath = connector.get('multipath', False)
hostlun_id = int(hostlun_id)
if not multipath:
properties['target_portal'] = ('%s:3260' % target_ips[0])
properties['target_iqn'] = iscsi_iqns[0]
properties['target_lun'] = hostlun_id
else:
properties['target_iqns'] = [iqn for iqn in iscsi_iqns]
properties['target_portals'] = [
'%s:3260' % ip for ip in target_ips]
properties['target_luns'] = [hostlun_id] * len(target_ips)
# If use CHAP, return CHAP info.
if chapinfo:
chap_username, chap_password = chapinfo.split(';')
properties['auth_method'] = 'CHAP'
properties['auth_username'] = chap_username
properties['auth_password'] = chap_password
LOG.info(_LI("initialize_connection success. Return data: %s."),
properties)
return {'driver_volume_type': 'iscsi', 'data': properties}
@utils.synchronized('huawei', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
initiator_name = connector['initiator']
volume_name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
host_name = connector['host']
lungroup_id = None
LOG.info(_LI(
'terminate_connection: volume name: %(volume)s, '
'initiator name: %(ini)s, '
'lun_id: %(lunid)s.'),
{'volume': volume_name,
'ini': initiator_name,
'lunid': lun_id},)
iscsi_conf = huawei_utils.get_iscsi_conf(self.xml_file_path)
portgroup = None
portgroup_id = None
view_id = None
left_lunnum = -1
for ini in iscsi_conf['Initiator']:
if ini['Name'] == initiator_name:
for key in ini:
if key == 'TargetPortGroup':
portgroup = ini['TargetPortGroup']
break
if portgroup:
portgroup_id = self.restclient.find_tgt_port_group(portgroup)
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name = six.text_type(hash(host_name))
host_id = self.restclient.find_host(host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.restclient.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.restclient.find_lungroup_from_map(view_id)
# Remove lun from lungroup.
if lun_id and self.restclient.check_lun_exist(lun_id):
if lungroup_id:
lungroup_ids = self.restclient.get_lungroupids_by_lunid(lun_id)
if lungroup_id in lungroup_ids:
self.restclient.remove_lun_from_lungroup(lungroup_id,
lun_id)
else:
LOG.warning(_LW("Lun is not in lungroup. "
"Lun id: %(lun_id)s. "
"lungroup id: %(lungroup_id)s."),
{"lun_id": lun_id,
"lungroup_id": lungroup_id})
else:
LOG.warning(_LW("Can't find lun on the array."))
# Remove portgroup from mapping view if no lun left in lungroup.
if lungroup_id:
left_lunnum = self.restclient.get_lunnum_from_lungroup(lungroup_id)
if portgroup_id and view_id and (int(left_lunnum) <= 0):
if self.restclient.is_portgroup_associated_to_view(view_id,
portgroup_id):
self.restclient.delete_portgroup_mapping_view(view_id,
portgroup_id)
if view_id and (int(left_lunnum) <= 0):
self.restclient.remove_chap(initiator_name)
if self.restclient.lungroup_associated(view_id, lungroup_id):
self.restclient.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.restclient.delete_lungroup(lungroup_id)
if self.restclient.is_initiator_associated_to_host(initiator_name):
self.restclient.remove_iscsi_from_host(initiator_name)
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.restclient.find_hostgroup(hostgroup_name)
if hostgroup_id:
if self.restclient.hostgroup_associated(view_id, hostgroup_id):
self.restclient.delete_hostgoup_mapping_view(view_id,
hostgroup_id)
self.restclient.remove_host_from_hostgroup(hostgroup_id,
host_id)
self.restclient.delete_hostgroup(hostgroup_id)
self.restclient.remove_host(host_id)
self.restclient.delete_mapping_view(view_id)
class Huawei18000FCDriver(HuaweiBaseDriver, driver.FibreChannelDriver):
"""FC driver for Huawei OceanStor 18000 storage arrays.
Version history:
1.0.0 - Initial driver
1.1.0 - Provide Huawei OceanStor 18000 storage volume driver
1.1.1 - Code refactor
Multiple pools support
SmartX support
Volume migration support
Volume retype support
FC zone enhancement
"""
VERSION = "1.1.1"
def __init__(self, *args, **kwargs):
super(Huawei18000FCDriver, self).__init__(*args, **kwargs)
self.fcsan_lookup_service = None
def get_volume_stats(self, refresh=False):
"""Get volume status."""
data = HuaweiBaseDriver.get_volume_stats(self, refresh=False)
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['storage_protocol'] = 'FC'
data['driver_version'] = self.VERSION
data['vendor_name'] = 'Huawei'
return data
@utils.synchronized('huawei', external=True)
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
wwns = connector['wwpns']
volume_name = huawei_utils.encode_name(volume['id'])
LOG.info(_LI(
'initialize_connection, initiator: %(wwpns)s,'
' volume name: %(volume)s.'),
{'wwpns': wwns,
'volume': volume_name},)
lun_id = self.restclient.get_lunid(volume, volume_name)
host_name_before_hash = None
host_name = connector['host']
if host_name and (len(host_name) > constants.MAX_HOSTNAME_LENGTH):
host_name_before_hash = host_name
host_name = six.text_type(hash(host_name))
if not self.fcsan_lookup_service:
self.fcsan_lookup_service = fczm_utils.create_lookup_service()
if self.fcsan_lookup_service:
# Use FC switch.
host_id = self.restclient.add_host_with_check(
host_name, host_name_before_hash)
zone_helper = fc_zone_helper.FCZoneHelper(
self.fcsan_lookup_service, self.restclient)
(tgt_port_wwns, init_targ_map) = (
zone_helper.build_ini_targ_map(wwns))
for ini in init_targ_map:
self.restclient.ensure_fc_initiator_added(ini, host_id)
else:
# Not use FC switch.
host_id = self.restclient.add_host_with_check(
host_name, host_name_before_hash)
online_wwns_in_host = (
self.restclient.get_host_online_fc_initiators(host_id))
online_free_wwns = self.restclient.get_online_free_wwns()
for wwn in wwns:
if (wwn not in online_wwns_in_host
and wwn not in online_free_wwns):
wwns_in_host = (
self.restclient.get_host_fc_initiators(host_id))
iqns_in_host = (
self.restclient.get_host_iscsi_initiators(host_id))
if not wwns_in_host and not iqns_in_host:
self.restclient.remove_host(host_id)
msg = (_('Can not add FC initiator to host.'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for wwn in wwns:
if wwn in online_free_wwns:
self.restclient.add_fc_port_to_host(host_id, wwn)
(tgt_port_wwns, init_targ_map) = (
self.restclient.get_init_targ_map(wwns))
# Add host into hostgroup.
hostgroup_id = self.restclient.add_host_into_hostgroup(host_id)
self.restclient.do_mapping(lun_id, hostgroup_id, host_id)
host_lun_id = self.restclient.find_host_lun_id(host_id, lun_id)
# Return FC properties.
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': int(host_lun_id),
'target_discovered': True,
'target_wwn': tgt_port_wwns,
'volume_id': volume['id'],
'initiator_target_map': init_targ_map}, }
LOG.info(_LI("initialize_connection, return data is: %s."),
info)
return info
@utils.synchronized('huawei', external=True)
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Delete map between a volume and a host."""
wwns = connector['wwpns']
volume_name = huawei_utils.encode_name(volume['id'])
lun_id = volume.get('provider_location')
host_name = connector['host']
left_lunnum = -1
lungroup_id = None
view_id = None
LOG.info(_LI('terminate_connection: volume name: %(volume)s, '
'wwpns: %(wwns)s, '
'lun_id: %(lunid)s.'),
{'volume': volume_name,
'wwns': wwns,
'lunid': lun_id},)
if host_name and len(host_name) > constants.MAX_HOSTNAME_LENGTH:
host_name = six.text_type(hash(host_name))
host_id = self.restclient.find_host(host_name)
if host_id:
mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id
view_id = self.restclient.find_mapping_view(mapping_view_name)
if view_id:
lungroup_id = self.restclient.find_lungroup_from_map(view_id)
if lun_id and self.restclient.check_lun_exist(lun_id):
if lungroup_id:
lungroup_ids = self.restclient.get_lungroupids_by_lunid(lun_id)
if lungroup_id in lungroup_ids:
self.restclient.remove_lun_from_lungroup(lungroup_id,
lun_id)
else:
LOG.warning(_LW("Lun is not in lungroup. "
"Lun id: %(lun_id)s. "
"Lungroup id: %(lungroup_id)s."),
{"lun_id": lun_id,
"lungroup_id": lungroup_id})
else:
LOG.warning(_LW("Can't find lun on the array."))
if lungroup_id:
left_lunnum = self.restclient.get_lunnum_from_lungroup(lungroup_id)
if int(left_lunnum) > 0:
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
else:
if not self.fcsan_lookup_service:
self.fcsan_lookup_service = fczm_utils.create_lookup_service()
if self.fcsan_lookup_service:
zone_helper = fc_zone_helper.FCZoneHelper(
self.fcsan_lookup_service, self.restclient)
(tgt_port_wwns, init_targ_map) = (
zone_helper.build_ini_targ_map(wwns))
else:
(tgt_port_wwns, init_targ_map) = (
self.restclient.get_init_targ_map(wwns))
for wwn in wwns:
if self.restclient.is_fc_initiator_associated_to_host(wwn):
self.restclient.remove_fc_from_host(wwn)
if lungroup_id:
if view_id and self.restclient.lungroup_associated(
view_id, lungroup_id):
self.restclient.delete_lungroup_mapping_view(view_id,
lungroup_id)
self.restclient.delete_lungroup(lungroup_id)
if host_id:
hostgroup_name = constants.HOSTGROUP_PREFIX + host_id
hostgroup_id = self.restclient.find_hostgroup(hostgroup_name)
if hostgroup_id:
if view_id and self.restclient.hostgroup_associated(
view_id, hostgroup_id):
self.restclient.delete_hostgoup_mapping_view(
view_id, hostgroup_id)
self.restclient.remove_host_from_hostgroup(
hostgroup_id, host_id)
self.restclient.delete_hostgroup(hostgroup_id)
if not self.restclient.check_fc_initiators_exist_in_host(
host_id):
self.restclient.remove_host(host_id)
if view_id:
self.restclient.delete_mapping_view(view_id)
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_wwn': tgt_port_wwns,
'initiator_target_map': init_targ_map}}
LOG.info(_LI("terminate_connection, return data is: %s."),
info)
return info
|
Paul-Ezell/cinder-1
|
cinder/volume/drivers/huawei/huawei_driver.py
|
Python
|
apache-2.0
| 49,899 |
# version code 988
# Please fill out this stencil and submit using the provided submission script.
from GF2 import one
from matutil import *
from vecutil import zero_vec
## Problem 1
# Write each matrix as a list of row lists
echelon_form_1 = [[1,2,0,2,0],
[0,1,0,3,4],
[0,0,2,3,4],
[0,0,0,2,0],
[0,0,0,0,4]]
echelon_form_2 = [[0,4,3,4,4],
[0,0,4,2,0],
[0,0,0,0,1],
[0,0,0,0,0]]
echelon_form_3 = [[1, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 0]]
echelon_form_4 = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
def find_first_non_zero (row):
length = len(row)
curr = 0
while curr < length and row[curr] == 0:
curr += 1
if curr < length: return curr
else: return -1
## Problem 2
def is_echelon(A):
'''
Input:
- A: a list of row lists
Output:
- True if A is in echelon form
- False otherwise
Examples:
>>> is_echelon([[1,1,1],[0,1,1],[0,0,1]])
True
>>> is_echelon([[0,1,1],[0,1,0],[0,0,1]])
False
'''
non_zeros = [find_first_non_zero(A[i]) for i in range(len(A))]
#print(non_zeros)
len_nzs = len(non_zeros)
is_null = True
for i in range(len_nzs):
if non_zeros[i] != -1:
is_null = False
break
if is_null == True: return True
for i in range(1, len_nzs):
# If previous row is all zeros then the current one cannot have non zero
if non_zeros[i-1] == -1 and non_zeros[i] != -1: return False
# The current non-zero element should be to the right of the previous row non-zero element
if non_zeros[i] <= non_zeros[i-1]: return False
return True
## Problem 3
# Give each answer as a list
echelon_form_vec_a = [1, 0, 3, 0]
echelon_form_vec_b = [-3, 0, -2, 3]
echelon_form_vec_c = [-5, 0, 2, 0, 2]
## Problem 4
# If a solution exists, give it as a list vector.
# If no solution exists, provide "None".
solving_with_echelon_form_a = None
solving_with_echelon_form_b = [21, 0, 2, 0, 0]
## Problem 5
def echelon_solve(rowlist, label_list, b):
'''
Input:
- rowlist: a list of Vecs
- label_list: a list of labels establishing an order on the domain of
Vecs in rowlist
- b: a vector (represented as a list)
Output:
- Vec x such that rowlist * x is b
>>> D = {'A','B','C','D','E'}
>>> U_rows = [Vec(D, {'A':one, 'E':one}), Vec(D, {'B':one, 'E':one}), Vec(D,{'C':one})]
>>> b_list = [one,0,one]>>> cols = ['A', 'B', 'C', 'D', 'E']
>>> echelon_solve(U_rows, cols, b_list)
Vec({'B', 'C', 'A', 'D', 'E'},{'B': 0, 'C': one, 'A': one})
'''
D = rowlist[0].D
x = zero_vec(D)
num_labels = len(label_list)
for j in reversed(range(len(D))):
if j > len(rowlist)-1: continue
row = rowlist[j]
if row == zero_vec(D): continue
# in the row find the label of the column with the first non-zero entry
for i in range(num_labels):
if row[label_list[i]] == one: break
c = label_list[i]
x[c] = (b[j] - x*row)/row[c]
return x
## Problem 6
D = {'A','B','C','D'}
rowlist = [Vec(D, {'A':one, 'B':one, 'D':one}), Vec(D, {'B':one}), Vec(D,{'C':one}), Vec(D,{'D':one})] # Provide as a list of Vec instances
label_list = ['A', 'B', 'C', 'D'] # Provide as a list
b = [one, one, 0, 0] # Provide as a list
## Problem 7
null_space_rows_a = {3, 4} # Put the row numbers of M from the PDF
## Problem 8
null_space_rows_b = {4}
## Problem 9
# Write each vector as a list
closest_vector_1 = [1.6, 3.2]
closest_vector_2 = [0, 1, 0]
closest_vector_3 = [3, 2, 1, -4]
## Problem 10
# Write each vector as a list
# sigma = <b, a>/<a, a>
# project b onto a = sigma * a
# b orthogonal to a = b - (b project onto a)
project_onto_1 = [2, 0]
projection_orthogonal_1 = [0, 1]
project_onto_2 = [-1/6, -1/3, 1/6]
projection_orthogonal_2 = [7/6, 4/3, 23/6]
project_onto_3 = [1, 1, 4]
projection_orthogonal_3 = [0, 0, 0]
## Problem 11
# Norm = sqrt (sum of the squares of the components of the vector)
norm1 = 3
norm2 = 4
norm3 = 1
|
tri2sing/LinearAlgebraPython
|
hw6.py
|
Python
|
apache-2.0
| 4,332 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.setpoint_managers import SetpointManagerMultiZoneCoolingAverage
log = logging.getLogger(__name__)
class TestSetpointManagerMultiZoneCoolingAverage(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_setpointmanagermultizonecoolingaverage(self):
pyidf.validation_level = ValidationLevel.error
obj = SetpointManagerMultiZoneCoolingAverage()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_hvac_air_loop_name = "object-list|HVAC Air Loop Name"
obj.hvac_air_loop_name = var_hvac_air_loop_name
# real
var_minimum_setpoint_temperature = 0.0001
obj.minimum_setpoint_temperature = var_minimum_setpoint_temperature
# real
var_maximum_setpoint_temperature = 0.0001
obj.maximum_setpoint_temperature = var_maximum_setpoint_temperature
# node
var_setpoint_node_or_nodelist_name = "node|Setpoint Node or NodeList Name"
obj.setpoint_node_or_nodelist_name = var_setpoint_node_or_nodelist_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].name, var_name)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].hvac_air_loop_name, var_hvac_air_loop_name)
self.assertAlmostEqual(idf2.setpointmanagermultizonecoolingaverages[0].minimum_setpoint_temperature, var_minimum_setpoint_temperature)
self.assertAlmostEqual(idf2.setpointmanagermultizonecoolingaverages[0].maximum_setpoint_temperature, var_maximum_setpoint_temperature)
self.assertEqual(idf2.setpointmanagermultizonecoolingaverages[0].setpoint_node_or_nodelist_name, var_setpoint_node_or_nodelist_name)
|
rbuffat/pyidf
|
tests/test_setpointmanagermultizonecoolingaverage.py
|
Python
|
apache-2.0
| 2,146 |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
"""Defines a Revision model for storing snapshots."""
from ggrc import db
from ggrc.models.computed_property import computed_property
from ggrc.models.mixins import Base
from ggrc.models.types import JsonType
class Revision(Base, db.Model):
"""Revision object holds a JSON snapshot of the object at a time."""
__tablename__ = 'revisions'
resource_id = db.Column(db.Integer, nullable=False)
resource_type = db.Column(db.String, nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('events.id'), nullable=False)
action = db.Column(db.Enum(u'created', u'modified', u'deleted'),
nullable=False)
content = db.Column(JsonType, nullable=False)
source_type = db.Column(db.String, nullable=True)
source_id = db.Column(db.Integer, nullable=True)
destination_type = db.Column(db.String, nullable=True)
destination_id = db.Column(db.Integer, nullable=True)
@staticmethod
def _extra_table_args(_):
return (db.Index('revisions_modified_by', 'modified_by_id'),)
_publish_attrs = [
'resource_id',
'resource_type',
'source_type',
'source_id',
'destination_type',
'destination_id',
'action',
'content',
'description',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Revision, cls).eager_query()
return query.options(
orm.subqueryload('modified_by'),
orm.subqueryload('event'), # used in description
)
def __init__(self, obj, modified_by_id, action, content):
self.resource_id = obj.id
self.modified_by_id = modified_by_id
self.resource_type = str(obj.__class__.__name__)
self.action = action
self.content = content
for attr in ["source_type",
"source_id",
"destination_type",
"destination_id"]:
setattr(self, attr, getattr(obj, attr, None))
def _description_mapping(self, link_objects):
"""Compute description for revisions with <-> in display name."""
display_name = self.content['display_name']
source, destination = display_name.split('<->')[:2]
mapping_verb = "linked" if self.resource_type in link_objects else "mapped"
if self.action == 'created':
result = u"{1} {2} to {0}".format(source, destination, mapping_verb)
elif self.action == 'deleted':
result = u"{1} un{2} from {0}".format(source, destination, mapping_verb)
else:
result = u"{0} {1}".format(display_name, self.action)
return result
@computed_property
def description(self):
"""Compute a human readable description from action and content."""
link_objects = ['ObjectDocument']
if 'display_name' not in self.content:
return ''
display_name = self.content['display_name']
if not display_name:
result = u"{0} {1}".format(self.resource_type, self.action)
elif u'<->' in display_name:
result = self._description_mapping(link_objects)
else:
if 'mapped_directive' in self.content:
# then this is a special case of combined map/creation
# should happen only for Section and Control
mapped_directive = self.content['mapped_directive']
if self.action == 'created':
result = u"New {0}, {1}, created and mapped to {2}".format(
self.resource_type,
display_name,
mapped_directive
)
elif self.action == 'deleted':
result = u"{0} unmapped from {1} and deleted".format(
display_name, mapped_directive)
else:
result = u"{0} {1}".format(display_name, self.action)
else:
# otherwise, it's a normal creation event
result = u"{0} {1}".format(display_name, self.action)
if self.event.action == "IMPORT":
result += ", via spreadsheet import"
return result
|
prasannav7/ggrc-core
|
src/ggrc/models/revision.py
|
Python
|
apache-2.0
| 4,093 |
import calendar
import datetime
import os
import time
from ajenti.api import *
from ajenti.ui import p, UIElement, on
@p('value', default='', bindtypes=[str, unicode, int, long])
@p('readonly', type=bool, default=False)
@p('type', default='text')
@plugin
class TextBox (UIElement):
typeid = 'textbox'
@p('value', default='', bindtypes=[str, unicode, int, long])
@plugin
class PasswordBox (UIElement):
typeid = 'passwordbox'
@p('value', default='', type=int, bindtypes=[str, unicode, int, long])
@plugin
class DateTime (UIElement):
typeid = 'datetime'
@property
def dateobject(self):
if self.value:
return datetime.fromtimestamp(self.value)
@dateobject.setter
def dateobject__set(self, value):
if value:
self.value = calendar.timegm(value.timetuple())
else:
self.value = None
@p('value', default='', bindtypes=[str, unicode])
@p('icon', default=None)
@p('placeholder', default=None)
@plugin
class Editable (UIElement):
typeid = 'editable'
@p('text', default='')
@p('value', default=False, bindtypes=[bool])
@plugin
class CheckBox (UIElement):
typeid = 'checkbox'
@p('labels', default=[], type=list)
@p('values', default=[], type=list, public=False)
@p('value', bindtypes=[object], public=False)
@p('index', default=0, type=int)
@p('server', default=False, type=bool)
@p('plain', default=False, type=bool)
@plugin
class Dropdown (UIElement):
typeid = 'dropdown'
def value_get(self):
if self.index < len(self.values):
try:
return self.values[self.index]
except TypeError:
return None
return None
def value_set(self, value):
if value in self.values:
self.index = self.values.index(value)
else:
self.index = 0
value = property(value_get, value_set)
@p('labels', default=[], type=list)
@p('values', default=[], type=list)
@p('separator', default=None, type=str)
@p('value', default='', bindtypes=[str, unicode])
@plugin
class Combobox (UIElement):
typeid = 'combobox'
@p('target', type=str)
@plugin
class FileUpload (UIElement):
typeid = 'fileupload'
@p('active', type=int)
@p('length', type=int)
@plugin
class Paging (UIElement):
typeid = 'paging'
@p('value', default='', bindtypes=[str, unicode])
@p('directory', default=False, type=bool)
@p('type', default='text')
@plugin
class Pathbox (UIElement):
typeid = 'pathbox'
def init(self, *args, **kwargs):
if self.directory:
self.dialog = self.ui.create('opendirdialog')
else:
self.dialog = self.ui.create('openfiledialog')
self.append(self.dialog)
self.dialog.id = 'dialog'
self.dialog.visible = False
def on_start(self):
self.find('dialog').navigate(os.path.split(self.value or '')[0] or '/')
self.find('dialog').visible = True
@on('dialog', 'select')
def on_select(self, path=None):
self.find('dialog').visible = False
if path:
self.value = path
|
lupyuen/RaspberryPiImage
|
usr/share/pyshared/ajenti/plugins/main/controls_inputs.py
|
Python
|
apache-2.0
| 3,085 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn.functional as F
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchmetrics import Accuracy
import pytorch_lightning as pl
from pytorch_lightning import LightningDataModule, LightningModule, seed_everything
from pytorch_lightning.callbacks import EarlyStopping
PATH_LEGACY = os.path.dirname(__file__)
class SklearnDataset(Dataset):
def __init__(self, x, y, x_type, y_type):
self.x = x
self.y = y
self._x_type = x_type
self._y_type = y_type
def __getitem__(self, idx):
return torch.tensor(self.x[idx], dtype=self._x_type), torch.tensor(self.y[idx], dtype=self._y_type)
def __len__(self):
return len(self.y)
class SklearnDataModule(LightningDataModule):
def __init__(self, sklearn_dataset, x_type, y_type, batch_size: int = 128):
super().__init__()
self.batch_size = batch_size
self._x, self._y = sklearn_dataset
self._split_data()
self._x_type = x_type
self._y_type = y_type
def _split_data(self):
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(
self._x, self._y, test_size=0.20, random_state=42
)
self.x_train, self.x_valid, self.y_train, self.y_valid = train_test_split(
self.x_train, self.y_train, test_size=0.40, random_state=42
)
def train_dataloader(self):
return DataLoader(
SklearnDataset(self.x_train, self.y_train, self._x_type, self._y_type),
shuffle=True,
batch_size=self.batch_size,
)
def val_dataloader(self):
return DataLoader(
SklearnDataset(self.x_valid, self.y_valid, self._x_type, self._y_type), batch_size=self.batch_size
)
def test_dataloader(self):
return DataLoader(
SklearnDataset(self.x_test, self.y_test, self._x_type, self._y_type), batch_size=self.batch_size
)
class ClassifDataModule(SklearnDataModule):
def __init__(self, num_features=24, length=6000, num_classes=3, batch_size=128):
data = make_classification(
n_samples=length,
n_features=num_features,
n_classes=num_classes,
n_clusters_per_class=2,
n_informative=int(num_features / num_classes),
random_state=42,
)
super().__init__(data, x_type=torch.float32, y_type=torch.long, batch_size=batch_size)
class ClassificationModel(LightningModule):
def __init__(self, num_features=24, num_classes=3, lr=0.01):
super().__init__()
self.save_hyperparameters()
self.lr = lr
for i in range(3):
setattr(self, f"layer_{i}", nn.Linear(num_features, num_features))
setattr(self, f"layer_{i}a", torch.nn.ReLU())
setattr(self, "layer_end", nn.Linear(num_features, num_classes))
self.train_acc = Accuracy()
self.valid_acc = Accuracy()
self.test_acc = Accuracy()
def forward(self, x):
x = self.layer_0(x)
x = self.layer_0a(x)
x = self.layer_1(x)
x = self.layer_1a(x)
x = self.layer_2(x)
x = self.layer_2a(x)
x = self.layer_end(x)
logits = F.softmax(x, dim=1)
return logits
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return [optimizer], []
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
self.log("train_loss", loss, prog_bar=True)
self.log("train_acc", self.train_acc(logits, y), prog_bar=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
self.log("val_loss", F.cross_entropy(logits, y), prog_bar=False)
self.log("val_acc", self.valid_acc(logits, y), prog_bar=True)
def test_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
self.log("test_loss", F.cross_entropy(logits, y), prog_bar=False)
self.log("test_acc", self.test_acc(logits, y), prog_bar=True)
def main_train(dir_path, max_epochs: int = 20):
seed_everything(42)
stopping = EarlyStopping(monitor="val_acc", mode="max", min_delta=0.005)
trainer = pl.Trainer(
default_root_dir=dir_path,
gpus=int(torch.cuda.is_available()),
precision=(16 if torch.cuda.is_available() else 32),
checkpoint_callback=True,
callbacks=[stopping],
min_epochs=3,
max_epochs=max_epochs,
accumulate_grad_batches=2,
deterministic=True,
)
dm = ClassifDataModule()
model = ClassificationModel()
trainer.fit(model, datamodule=dm)
res = trainer.test(model, datamodule=dm)
assert res[0]["test_loss"] <= 0.7
assert res[0]["test_acc"] >= 0.85
assert trainer.current_epoch < (max_epochs - 1)
if __name__ == "__main__":
path_dir = os.path.join(PATH_LEGACY, "checkpoints", str(pl.__version__))
main_train(path_dir)
|
williamFalcon/pytorch-lightning
|
legacy/simple_classif_training.py
|
Python
|
apache-2.0
| 5,832 |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
from . import models
from .rest import RESTClientObject
from .rest import ApiException
import os
import re
import sys
import urllib
import json
import mimetypes
import random
import tempfile
import threading
from datetime import datetime
from datetime import date
# python 2 and python 3 compatibility library
from six import iteritems
try:
# for python3
from urllib.parse import quote
except ImportError:
# for python2
from urllib import quote
from .configuration import Configuration
class ApiClient(object):
"""
Generic API client for Swagger client library builds.
Swagger generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the Swagger
templates.
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
:param host: The base path for the server to call.
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to the API.
"""
def __init__(self, host=None, header_name=None, header_value=None, cookie=None):
"""
Constructor of the class.
"""
self.rest_client = RESTClientObject()
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
if host is None:
self.host = Configuration().host
else:
self.host = host
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Swagger-Codegen/1.0.0/python'
@property
def user_agent(self):
"""
Gets user agent.
"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
"""
Sets user agent.
"""
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
def __call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None):
# headers parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
for k, v in iteritems(path_params):
replacement = quote(str(self.to_path_value(v)))
resource_path = resource_path.\
replace('{' + k + '}', replacement)
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
query_params = {k: self.to_path_value(v)
for k, v in iteritems(query_params)}
# post parameters
if post_params or files:
post_params = self.prepare_post_parameters(post_params, files)
post_params = self.sanitize_for_serialization(post_params)
# auth setting
self.update_params_for_auth(header_params, query_params, auth_settings)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
url = self.host + resource_path
# perform request and return response
response_data = self.request(method, url,
query_params=query_params,
headers=header_params,
post_params=post_params, body=body)
self.last_response = response_data
# deserialize response data
if response_type:
deserialized_data = self.deserialize(response_data, response_type)
else:
deserialized_data = None
if callback:
callback(deserialized_data)
else:
return deserialized_data
def to_path_value(self, obj):
"""
Takes value and turn it into a string suitable for inclusion in
the path, by url-encoding.
:param obj: object or string value.
:return string: quoted value.
"""
if type(obj) == list:
return ','.join(obj)
else:
return str(obj)
def sanitize_for_serialization(self, obj):
"""
Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is swagger model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
types = (str, int, float, bool, tuple)
if sys.version_info < (3,0):
types = types + (unicode,)
if isinstance(obj, type(None)):
return None
elif isinstance(obj, types):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, (datetime, date)):
return obj.isoformat()
else:
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `swagger_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = {obj.attribute_map[attr]: getattr(obj, attr)
for attr, _ in iteritems(obj.swagger_types)
if getattr(obj, attr) is not None}
return {key: self.sanitize_for_serialization(val)
for key, val in iteritems(obj_dict)}
def deserialize(self, response, response_type):
"""
Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialzied object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if "file" == response_type:
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""
Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('list['):
sub_kls = re.match('list\[(.*)\]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('dict('):
sub_kls = re.match('dict\(([^,]*), (.*)\)', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in iteritems(data)}
# convert str to class
# for native types
if klass in ['int', 'float', 'str', 'bool',
"date", 'datetime', "object"]:
klass = eval(klass)
# for model types
else:
klass = eval('models.' + klass)
if klass in [int, float, str, bool]:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == date:
return self.__deserialize_date(data)
elif klass == datetime:
return self.__deserialize_datatime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_type=None, auth_settings=None, callback=None):
"""
Makes the HTTP request (synchronous) and return the deserialized data.
To make an async request, define a function for callback.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param callback function: Callback function for asynchronous request.
If provide this parameter,
the request will be called asynchronously.
:return:
If provide parameter callback,
the request will be called asynchronously.
The method will return the request thread.
If parameter callback is None,
then the method will return the response directly.
"""
if callback is None:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_type, auth_settings, callback)
else:
thread = threading.Thread(target=self.__call_api,
args=(resource_path, method,
path_params, query_params,
header_params, body,
post_params, files,
response_type, auth_settings,
callback))
thread.start()
return thread
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None):
"""
Makes the HTTP request using RESTClient.
"""
if method == "GET":
return self.rest_client.GET(url,
query_params=query_params,
headers=headers)
elif method == "HEAD":
return self.rest_client.HEAD(url,
query_params=query_params,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.OPTIONS(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "POST":
return self.rest_client.POST(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "PUT":
return self.rest_client.PUT(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "PATCH":
return self.rest_client.PATCH(url,
query_params=query_params,
headers=headers,
post_params=post_params,
body=body)
elif method == "DELETE":
return self.rest_client.DELETE(url,
query_params=query_params,
headers=headers,
body=body)
else:
raise ValueError(
"http method must be `GET`, `HEAD`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def prepare_post_parameters(self, post_params=None, files=None):
"""
Builds form parameters.
:param post_params: Normal form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if post_params:
params = post_params
if files:
for k, v in iteritems(files):
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = mimetypes.\
guess_type(filename)[0] or 'application/octet-stream'
params.append(tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""
Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
accepts = list(map(lambda x: x.lower(), accepts))
if 'application/json' in accepts:
return 'application/json'
else:
return ', '.join(accepts)
def select_header_content_type(self, content_types):
"""
Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return 'application/json'
content_types = list(map(lambda x: x.lower(), content_types))
if 'application/json' in content_types:
return 'application/json'
else:
return content_types[0]
def update_params_for_auth(self, headers, querys, auth_settings):
"""
Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param querys: Query parameters dict to be updated.
:param auth_settings: Authentication setting identifiers list.
"""
config = Configuration()
if not auth_settings:
return
for auth in auth_settings:
auth_setting = config.auth_settings().get(auth)
if auth_setting:
if not auth_setting['value']:
continue
elif auth_setting['in'] == 'header':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
querys[auth_setting['key']] = auth_setting['value']
else:
raise ValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
config = Configuration()
fd, path = tempfile.mkstemp(dir=config.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.\
search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).\
group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "w") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""
Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, float, str, bool.
"""
try:
value = klass(data)
except UnicodeEncodeError:
value = unicode(data)
except TypeError:
value = data
return value
def __deserialize_object(self, value):
"""
Return a original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""
Deserializes string to date.
:param string: str.
:return: date.
"""
try:
from dateutil.parser import parse
return parse(string).date()
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a date object"
.format(string)
)
def __deserialize_datatime(self, string):
"""
Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
from dateutil.parser import parse
return parse(string)
except ImportError:
return string
except ValueError:
raise ApiException(
status=0,
reason="Failed to parse `{0}` into a datetime object".
format(string)
)
def __deserialize_model(self, data, klass):
"""
Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
instance = klass()
for attr, attr_type in iteritems(instance.swagger_types):
if data is not None \
and instance.attribute_map[attr] in data\
and isinstance(data, (list, dict)):
value = data[instance.attribute_map[attr]]
setattr(instance, attr, self.__deserialize(value, attr_type))
return instance
|
wavefront-mike/python-client
|
wavefront_client/api_client.py
|
Python
|
apache-2.0
| 20,266 |
import os
import re
import json
def createSpeciesJson(source_data_path):
# create the species.json file using data from the specified path
# traverse directories looking for dirs named "1km". If it's
# path matches this pattern:
# .../<taxon-name>/models/<species-name>/1km
# then record that as a species / taxon record.
# here's a regex to test for species dirs:
one_km_regex = re.compile(r'/(\w+)/species/(\w+)/1km$')
# we'll get the species common name from here:
common_names = {}
cn_file = os.path.join(os.path.dirname(__file__), 'all_species.json')
try:
# try reading in the list of sci-to-common species names
with open(cn_file) as f:
common_names = json.load(f)
except:
# give up on common names if we can't read them
common_names = {}
#
# okay, ready to check for modelled species
#
species_list = {}
for dir, subdirs, files in os.walk(source_data_path):
match = one_km_regex.search(dir)
if match:
taxon = match.group(1)
sci_name = match.group(2).replace('_', ' ')
species_list[sci_name] = {
"commonNames": common_names.get(sci_name, [""]),
"group": taxon
}
# if we found a species dir, we don't need to keep
# os.walk()ing into its descendent dirs
subdirs[:] = []
# now save our species list
json_path = os.path.join(os.path.dirname(__file__), 'species.json')
with open(json_path, 'w') as json_file:
json.dump(species_list, json_file, sort_keys = True, indent = 4)
def createBiodiversityJson(source_data_path):
# create the biodiversity.json file using data from the specified path
# traverse directories looking for "deciles" dirs.
# If a dir's path matches this pattern:
# .../<taxon-name>/biodiversity/deciles
# then record that as a taxon / biodiversity record.
# here's a regex to test for biodiv dirs:
biodiv_regex = re.compile(r'/(\w+)/biodiversity/deciles$')
biodiv_list = {}
for dir, subdirs, files in os.walk(source_data_path):
match = biodiv_regex.search(dir)
if match:
taxon = match.group(1)
biodiv_list[taxon] = {
"group": taxon
}
# if we found a biodiv dir, we don't need to keep
# os.walk()ing into its descendent dirs
subdirs[:] = []
# now save our biodiv list
json_path = os.path.join(os.path.dirname(__file__), 'biodiversity.json')
with open(json_path, 'w') as json_file:
json.dump(biodiv_list, json_file, sort_keys = True, indent = 4)
|
jcu-eresearch/climas-ng
|
webapp/climasng/data/datafinder.py
|
Python
|
apache-2.0
| 2,719 |
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0006_updater_updaterevent'),
('fanfunding', '0002_auto_20160416_0621'),
]
operations = [
migrations.RenameField(
model_name='fanfundingevent',
old_name='ffu',
new_name='updater',
),
migrations.RemoveField(
model_name='fanfundingevent',
name='funding_id',
),
migrations.RemoveField(
model_name='fanfundingevent',
name='id',
),
migrations.RemoveField(
model_name='fanfundingupdate',
name='failure_count',
),
migrations.RemoveField(
model_name='fanfundingupdate',
name='id',
),
migrations.RemoveField(
model_name='fanfundingupdate',
name='last_failure',
),
migrations.RemoveField(
model_name='fanfundingupdate',
name='last_failure_message',
),
migrations.RemoveField(
model_name='fanfundingupdate',
name='last_update',
),
migrations.AddField(
model_name='fanfundingevent',
name='updaterevent_ptr',
field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=1, serialize=False, to='main.UpdaterEvent'),
preserve_default=False,
),
migrations.AddField(
model_name='fanfundingupdate',
name='updater_ptr',
field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=1, serialize=False, to='main.Updater'),
preserve_default=False,
),
]
|
google/mirandum
|
alerts/fanfunding/migrations/0003_auto_20160416_2023.py
|
Python
|
apache-2.0
| 2,470 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provider for running jobs on Google Cloud Platform.
This module implements job creation, listing, and canceling using the
Google Genomics Pipelines and Operations APIs.
"""
# pylint: disable=g-tzinfo-datetime
from datetime import datetime
import itertools
import json
import os
import re
import socket
import string
import sys
import textwrap
from . import base
from .._dsub_version import DSUB_VERSION
import apiclient.discovery
import apiclient.errors
from ..lib import param_util
from ..lib import providers_util
from oauth2client.client import GoogleCredentials
from oauth2client.client import HttpAccessTokenRefreshError
import pytz
import retrying
_PROVIDER_NAME = 'google'
# Create file provider whitelist.
_SUPPORTED_FILE_PROVIDERS = frozenset([param_util.P_GCS])
_SUPPORTED_LOGGING_PROVIDERS = _SUPPORTED_FILE_PROVIDERS
_SUPPORTED_INPUT_PROVIDERS = _SUPPORTED_FILE_PROVIDERS
_SUPPORTED_OUTPUT_PROVIDERS = _SUPPORTED_FILE_PROVIDERS
# Environment variable name for the script body
SCRIPT_VARNAME = '_SCRIPT'
# Mount point for the data disk on the VM and in the Docker container
DATA_MOUNT_POINT = '/mnt/data'
# Special dsub directories within the Docker container
#
# Attempt to keep the dsub runtime environment sane by being very prescriptive.
# Assume a single disk for everything that needs to be written by the dsub
# runtime environment or the user.
#
# Backends like the Google Pipelines API, allow for the user to set both
# a boot-disk-size and a disk-size. But the boot-disk-size is not something
# that users should have to worry about, so don't put anything extra there.
#
# Put everything meaningful on the data disk:
#
# input: files localized from object storage
# output: files to de-localize to object storage
#
# script: any code that dsub writes (like the user script)
# tmp: set TMPDIR in the environment to point here
#
# workingdir: A workspace directory for user code.
# This is also the explicit working directory set before the
# user script runs.
SCRIPT_DIR = '%s/script' % DATA_MOUNT_POINT
TMP_DIR = '%s/tmp' % DATA_MOUNT_POINT
WORKING_DIR = '%s/workingdir' % DATA_MOUNT_POINT
MK_RUNTIME_DIRS_COMMAND = '\n'.join(
'mkdir --mode=777 -p "%s" ' % dir
for dir in [SCRIPT_DIR, TMP_DIR, WORKING_DIR])
DOCKER_COMMAND = textwrap.dedent("""\
set -o errexit
set -o nounset
# Create runtime directories
{mk_runtime_dirs}
# Write the script to a file and make it executable
echo "${{_SCRIPT}}" > "{script_path}"
chmod u+x "{script_path}"
# Install gsutil if there are recursive copies to do
{install_cloud_sdk}
# Set environment variables for inputs with wildcards
{export_inputs_with_wildcards}
# Set environment variables for recursive input directories
{export_input_dirs}
# Recursive copy input directories
{copy_input_dirs}
# Create the output directories
{mk_output_dirs}
# Set environment variables for recursive output directories
{export_output_dirs}
# Set TMPDIR
export TMPDIR="{tmpdir}"
# DEPRECATED: do not use DATA_ROOT
export DATA_ROOT=/mnt/data
# Set the working directory
cd "{working_dir}"
# Run the user script
"{script_path}"
# Recursive copy output directories
{copy_output_dirs}
""")
# If an output directory is marked as "recursive", then dsub takes over the
# responsibilities of de-localizing that output directory.
#
# If the docker image already has gsutil in it, then we just use it.
# For large numbers of pipelines that utilize the recursive output feature,
# including Cloud SDK in the docker image is generally preferred.
#
# When one is just getting started with their pipeline, adding Cloud SDK
# installation in their docker image should not be a requirement.
INSTALL_CLOUD_SDK = textwrap.dedent("""\
if ! type gsutil; then
apt-get update
apt-get --yes install ca-certificates gcc gnupg2 python-dev python-setuptools
easy_install -U pip
pip install -U crcmod
apt-get --yes install lsb-release
export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)"
echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" >> /etc/apt/sources.list.d/google-cloud-sdk.list
apt-get update && apt-get --yes install curl
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
apt-get update && apt-get --yes install google-cloud-sdk
fi
""")
# Transient errors for the Google APIs should not cause them to fail.
# There are a set of HTTP and socket errors which we automatically retry.
# 429: too frequent polling
# 50x: backend error
TRANSIENT_HTTP_ERROR_CODES = set([429, 500, 503, 504])
# Socket error 104 (connection reset) should also be retried
TRANSIENT_SOCKET_ERROR_CODES = set([104])
# When attempting to cancel an operation that is already completed
# (succeeded, failed, or canceled), the response will include:
# "error": {
# "code": 400,
# "status": "FAILED_PRECONDITION",
# }
FAILED_PRECONDITION_CODE = 400
FAILED_PRECONDITION_STATUS = 'FAILED_PRECONDITION'
# List of Compute Engine zones, which enables simple wildcard expansion.
# We could look this up dynamically, but new zones come online
# infrequently enough, this is easy to keep up with.
# Also - the Pipelines API may one day directly support zone wildcards.
#
# To refresh this list:
# gcloud compute zones list --format='value(name)' \
# | sort | awk '{ printf " '\''%s'\'',\n", $1 }'
_ZONES = [
'asia-east1-a',
'asia-east1-b',
'asia-east1-c',
'asia-northeast1-a',
'asia-northeast1-b',
'asia-northeast1-c',
'asia-southeast1-a',
'asia-southeast1-b',
'australia-southeast1-a',
'australia-southeast1-b',
'australia-southeast1-c',
'europe-west1-b',
'europe-west1-c',
'europe-west1-d',
'europe-west2-a',
'europe-west2-b',
'europe-west2-c',
'europe-west3-a',
'europe-west3-b',
'europe-west3-c',
'southamerica-east1-a',
'southamerica-east1-b',
'southamerica-east1-c',
'us-central1-a',
'us-central1-b',
'us-central1-c',
'us-central1-f',
'us-east1-b',
'us-east1-c',
'us-east1-d',
'us-east4-a',
'us-east4-b',
'us-east4-c',
'us-west1-a',
'us-west1-b',
'us-west1-c',
]
def _get_zones(input_list):
"""Returns a list of zones based on any wildcard input.
This function is intended to provide an easy method for producing a list
of desired zones for a pipeline to run in.
The Pipelines API default zone list is "any zone". The problem with
"any zone" is that it can lead to incurring Cloud Storage egress charges
if the GCE zone selected is in a different region than the GCS bucket.
See https://cloud.google.com/storage/pricing#network-egress.
A user with a multi-region US bucket would want to pipelines to run in
a "us-*" zone.
A user with a regional bucket in US would want to restrict pipelines to
run in a zone in that region.
Rarely does the specific zone matter for a pipeline.
This function allows for a simple short-hand such as:
[ "us-*" ]
[ "us-central1-*" ]
These examples will expand out to the full list of US and us-central1 zones
respectively.
Args:
input_list: list of zone names/patterns
Returns:
A list of zones, with any wildcard zone specifications expanded.
"""
output_list = []
for zone in input_list:
if zone.endswith('*'):
prefix = zone[:-1]
output_list.extend([z for z in _ZONES if z.startswith(prefix)])
else:
output_list.append(zone)
return output_list
def _print_error(msg):
"""Utility routine to emit messages to stderr."""
print >> sys.stderr, msg
class _Label(param_util.LabelParam):
"""Name/value label metadata for a pipeline.
Attributes:
name (str): the label name.
value (str): the label value (optional).
"""
_allow_reserved_keys = True
__slots__ = ()
@staticmethod
def convert_to_label_chars(s):
"""Turn the specified name and value into a valid Google label."""
# We want the results to be user-friendly, not just functional.
# So we can't base-64 encode it.
# * If upper-case: lower-case it
# * If the char is not a standard letter or digit. make it a dash
accepted_characters = string.ascii_lowercase + string.digits + '-'
def label_char_transform(char):
if char in accepted_characters:
return char
if char in string.ascii_uppercase:
return char.lower()
return '-'
return ''.join(label_char_transform(c) for c in s)
def _retry_api_check(exception):
"""Return True if we should retry. False otherwise.
Args:
exception: An exception to test for transience.
Returns:
True if we should retry. False otherwise.
"""
_print_error('Exception %s: %s' % (type(exception).__name__, str(exception)))
if isinstance(exception, apiclient.errors.HttpError):
if exception.resp.status in TRANSIENT_HTTP_ERROR_CODES:
return True
if isinstance(exception, socket.error):
if exception.errno in TRANSIENT_SOCKET_ERROR_CODES:
return True
if isinstance(exception, HttpAccessTokenRefreshError):
return True
return False
class _Api(object):
# Exponential backoff retrying API execution.
# Maximum 23 retries. Wait 1, 2, 4 ... 64, 64, 64... seconds.
@staticmethod
@retrying.retry(
stop_max_attempt_number=23,
retry_on_exception=_retry_api_check,
wait_exponential_multiplier=1000,
wait_exponential_max=64000)
def execute(api):
return api.execute()
class _Pipelines(object):
"""Utilty methods for creating pipeline operations."""
@classmethod
def _build_pipeline_input_file_param(cls, var_name, docker_path):
"""Return a dict object representing a pipeline input argument."""
# If the filename contains a wildcard, then the target Docker path must
# be a directory in order to ensure consistency whether the source pattern
# contains 1 or multiple files.
#
# In that case, we set the docker_path to explicitly have a trailing slash
# (for the Pipelines API "gsutil cp" handling, and then override the
# associated var_name environment variable in the generated Docker command.
path, filename = os.path.split(docker_path)
if '*' in filename:
return cls._build_pipeline_file_param(var_name, path + '/')
else:
return cls._build_pipeline_file_param(var_name, docker_path)
@classmethod
def _build_pipeline_file_param(cls, var_name, docker_path):
"""Return a dict object representing a pipeline input or output argument."""
return {
'name': var_name,
'localCopy': {
'path': docker_path,
'disk': 'datadisk'
}
}
@classmethod
def _build_pipeline_docker_command(cls, script_name, inputs, outputs):
"""Return a multi-line string of the full pipeline docker command."""
# We upload the user script as an environment argument
# and write it to SCRIPT_DIR (preserving its local file name).
#
# The docker_command:
# * writes the script body to a file
# * installs gcloud if there are recursive copies to do
# * sets environment variables for inputs with wildcards
# * sets environment variables for recursive input directories
# * recursively copies input directories
# * creates output directories
# * sets environment variables for recursive output directories
# * sets the DATA_ROOT environment variable to /mnt/data
# * sets the working directory to ${DATA_ROOT}
# * executes the user script
# * recursively copies output directories
recursive_input_dirs = [var for var in inputs if var.recursive]
recursive_output_dirs = [var for var in outputs if var.recursive]
install_cloud_sdk = ''
if recursive_input_dirs or recursive_output_dirs:
install_cloud_sdk = INSTALL_CLOUD_SDK
export_input_dirs = ''
copy_input_dirs = ''
if recursive_input_dirs:
export_input_dirs = providers_util.build_recursive_localize_env(
DATA_MOUNT_POINT, inputs)
copy_input_dirs = providers_util.build_recursive_localize_command(
DATA_MOUNT_POINT, inputs, param_util.P_GCS)
export_output_dirs = ''
copy_output_dirs = ''
if recursive_output_dirs:
export_output_dirs = providers_util.build_recursive_gcs_delocalize_env(
DATA_MOUNT_POINT, outputs)
copy_output_dirs = providers_util.build_recursive_delocalize_command(
DATA_MOUNT_POINT, outputs, param_util.P_GCS)
mkdirs = '\n'.join([
'mkdir -p {0}/{1}'.format(DATA_MOUNT_POINT, var.docker_path if
var.recursive else
os.path.dirname(var.docker_path))
for var in outputs
])
export_inputs_with_wildcards = ''
inputs_with_wildcards = [
var for var in inputs
if not var.recursive and '*' in os.path.basename(var.docker_path)
]
export_inputs_with_wildcards = '\n'.join([
'export {0}="{1}/{2}"'.format(var.name, DATA_MOUNT_POINT,
var.docker_path)
for var in inputs_with_wildcards
])
return DOCKER_COMMAND.format(
mk_runtime_dirs=MK_RUNTIME_DIRS_COMMAND,
script_path='%s/%s' % (SCRIPT_DIR, script_name),
install_cloud_sdk=install_cloud_sdk,
export_inputs_with_wildcards=export_inputs_with_wildcards,
export_input_dirs=export_input_dirs,
copy_input_dirs=copy_input_dirs,
mk_output_dirs=mkdirs,
export_output_dirs=export_output_dirs,
tmpdir=TMP_DIR,
working_dir=WORKING_DIR,
copy_output_dirs=copy_output_dirs)
@classmethod
def build_pipeline(cls, project, min_cores, min_ram, disk_size,
boot_disk_size, preemptible, image, zones, script_name,
envs, inputs, outputs, pipeline_name):
"""Builds a pipeline configuration for execution.
Args:
project: string name of project.
min_cores: int number of CPU cores required per job.
min_ram: int GB of RAM required per job.
disk_size: int GB of disk to attach under /mnt/data.
boot_disk_size: int GB of disk for boot.
preemptible: use a preemptible VM for the job
image: string Docker image name in which to run.
zones: list of zone names for jobs to be run at.
script_name: file name of the script to run.
envs: list of EnvParam objects specifying environment variables to set
within each job.
inputs: list of FileParam objects specifying input variables to set
within each job.
outputs: list of FileParam objects specifying output variables to set
within each job.
pipeline_name: string name of pipeline.
Returns:
A nested dictionary with one entry under the key emphemeralPipeline
containing the pipeline configuration.
"""
# Format the docker command
docker_command = cls._build_pipeline_docker_command(script_name, inputs,
outputs)
# Pipelines inputParameters can be both simple name/value pairs which get
# set as environment variables, as well as input file paths which the
# Pipelines controller will automatically localize to the Pipeline VM.
# In the ephemeralPipeline object, the inputParameters are only defined;
# the values are passed in the pipelineArgs.
# Pipelines outputParameters are only output file paths, which the
# Pipelines controller can automatically de-localize after the docker
# command completes.
# The Pipelines API does not support recursive copy of file parameters,
# so it is implemented within the dsub-generated pipeline.
# Any inputs or outputs marked as "recursive" are completely omitted here;
# their environment variables will be set in the docker command, and
# recursive copy code will be generated there as well.
input_envs = [{
'name': SCRIPT_VARNAME
}] + [{
'name': env.name
} for env in envs]
input_files = [
cls._build_pipeline_input_file_param(var.name, var.docker_path)
for var in inputs if not var.recursive
]
# Outputs are an array of file parameters
output_files = [
cls._build_pipeline_file_param(var.name, var.docker_path)
for var in outputs if not var.recursive
]
# The ephemeralPipeline provides the template for the pipeline.
# pyformat: disable
return {
'ephemeralPipeline': {
'projectId': project,
'name': pipeline_name,
# Define the resources needed for this pipeline.
'resources': {
'minimumCpuCores': min_cores,
'minimumRamGb': min_ram,
'bootDiskSizeGb': boot_disk_size,
'preemptible': preemptible,
# Create a data disk that is attached to the VM and destroyed
# when the pipeline terminates.
'zones': _get_zones(zones),
'disks': [{
'name': 'datadisk',
'autoDelete': True,
'sizeGb': disk_size,
'mountPoint': DATA_MOUNT_POINT,
}],
},
'inputParameters': input_envs + input_files,
'outputParameters': output_files,
'docker': {
'imageName': image,
'cmd': docker_command,
}
}
}
# pyformat: enable
@classmethod
def build_pipeline_args(cls, project, script, job_data, task_data,
preemptible, logging_uri, scopes, keep_alive):
"""Builds pipeline args for execution.
Args:
project: string name of project.
script: Body of the script to execute.
job_data: dictionary of values for labels, envs, inputs, and outputs for
this job.
task_data: dictionary of values for labels, envs, inputs, and outputs for
this task.
preemptible: use a preemptible VM for the job
logging_uri: path for job logging output.
scopes: list of scope.
keep_alive: Seconds to keep VM alive on failure
Returns:
A nested dictionary with one entry under the key pipelineArgs containing
the pipeline arguments.
"""
# For the Pipelines API, envs and file inputs are all "inputs".
inputs = {}
inputs.update({SCRIPT_VARNAME: script})
inputs.update(
{var.name: var.value
for var in job_data['envs'] + task_data['envs']})
inputs.update({
var.name: var.uri
for var in job_data['inputs'] + task_data['inputs'] if not var.recursive
})
# Remove wildcard references for non-recursive output. When the pipelines
# controller generates a delocalize call, it must point to a bare directory
# for patterns. The output param OUTFILE=gs://bucket/path/*.bam should
# delocalize with a call similar to:
# gsutil cp /mnt/data/output/gs/bucket/path/*.bam gs://bucket/path/
outputs = {}
for var in job_data['outputs'] + task_data['outputs']:
if var.recursive:
continue
if '*' in var.uri.basename:
outputs[var.name] = var.uri.path
else:
outputs[var.name] = var.uri
labels = {}
labels.update({
label.name: label.value if label.value else ''
for label in job_data['labels'] + task_data['labels']
})
# pyformat: disable
args = {
'pipelineArgs': {
'projectId': project,
'resources': {
'preemptible': preemptible,
},
'inputs': inputs,
'outputs': outputs,
'labels': labels,
'serviceAccount': {
'email': 'default',
'scopes': scopes,
},
# Pass the user-specified GCS destination for pipeline logging.
'logging': {
'gcsPath': logging_uri
},
}
}
# pyformat: enable
if keep_alive:
args['pipelineArgs'][
'keep_vm_alive_on_failure_duration'] = '%ss' % keep_alive
return args
@staticmethod
def run_pipeline(service, pipeline):
return _Api.execute(service.pipelines().run(body=pipeline))
class _Operations(object):
"""Utilty methods for querying and canceling pipeline operations."""
@staticmethod
def get_filter(project,
status=None,
user_id=None,
job_id=None,
job_name=None,
labels=None,
task_id=None,
create_time=None):
"""Return a filter string for operations.list()."""
ops_filter = []
ops_filter.append('projectId = %s' % project)
if status and status != '*':
ops_filter.append('status = %s' % status)
if user_id != '*':
ops_filter.append('labels.user-id = %s' % user_id)
if job_id != '*':
ops_filter.append('labels.job-id = %s' % job_id)
if job_name != '*':
ops_filter.append('labels.job-name = %s' % job_name)
if task_id != '*':
ops_filter.append('labels.task-id = %s' % task_id)
# Even though labels are nominally 'arbitrary strings' they are trusted
# since param_util restricts the character set.
if labels:
for l in labels:
ops_filter.append('labels.%s = %s' % (l.name, l.value))
if create_time:
ops_filter.append('createTime >= %s' % create_time)
return ' AND '.join(ops_filter)
@classmethod
def get_operation_type(cls, op):
return op.get('metadata', {}).get('request', {}).get('@type')
@classmethod
def get_operation_label(cls, op, name):
return op.get('metadata', {}).get('labels', {}).get(name)
@classmethod
def is_pipelines_operation(cls, op):
"""Check that an operation is a genomics pipeline run.
An operation is a Genomics Pipeline run if the request metadata's @type
is "type.googleapis.com/google.genomics.v1alpha2.RunPipelineRequest.
Args:
op: a pipelines operation.
Returns:
Boolean, true if the operation is a RunPipelineRequest.
"""
return cls.get_operation_type(
op) == 'type.googleapis.com/google.genomics.v1alpha2.RunPipelineRequest'
@classmethod
def is_dsub_operation(cls, op):
"""Determine if a pipelines operation is a dsub request.
We don't have a rigorous way to identify an operation as being submitted
by dsub. Our best option is to check for certain fields that have always
been part of dsub operations.
- labels: job-id, job-name, and user-id have always existed
- envs: _SCRIPT has always existed.
In order to keep a simple heuristic this test only uses labels.
Args:
op: a pipelines operation.
Returns:
Boolean, true if the pipeline run was generated by dsub.
"""
if not cls.is_pipelines_operation(op):
return False
for name in ['job-id', 'job-name', 'user-id']:
if not cls.get_operation_label(op, name):
return False
return True
@classmethod
def list(cls, service, ops_filter, max_ops=0):
"""Gets the list of operations for the specified filter.
Args:
service: Google Genomics API service object
ops_filter: string filter of operations to return
max_ops: maximum number of operations to return (0 indicates no maximum)
Returns:
A list of operations matching the filter criteria.
"""
operations = []
page_token = None
page_size = None
while not max_ops or len(operations) < max_ops:
if max_ops:
# If a maximum number of operations is requested, limit the requested
# pageSize to the documented default (256) or less if we can.
page_size = min(max_ops - len(operations), 256)
api = service.operations().list(
name='operations',
filter=ops_filter,
pageToken=page_token,
pageSize=page_size)
response = _Api.execute(api)
ops = response['operations'] if 'operations' in response else None
if ops:
for op in ops:
if cls.is_dsub_operation(op):
operations.append(op)
# Exit if there are no more operations
if 'nextPageToken' not in response or not response['nextPageToken']:
break
page_token = response['nextPageToken']
if max_ops and len(operations) > max_ops:
del operations[max_ops:]
return [GoogleOperation(o) for o in operations]
@classmethod
def _cancel_batch(cls, service, ops):
"""Cancel a batch of operations.
Args:
service: Google Genomics API service object.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
"""
# We define an inline callback which will populate a list of
# successfully canceled operations as well as a list of operations
# which were not successfully canceled.
canceled = []
failed = []
def handle_cancel(request_id, response, exception):
"""Callback for the cancel response."""
del response # unused
if exception:
# We don't generally expect any failures here, except possibly trying
# to cancel an operation that is already canceled or finished.
#
# If the operation is already finished, provide a clearer message than
# "error 400: Bad Request".
msg = 'error %s: %s' % (exception.resp.status, exception.resp.reason)
if exception.resp.status == FAILED_PRECONDITION_CODE:
detail = json.loads(exception.content)
status = detail.get('error', {}).get('status')
if status == FAILED_PRECONDITION_STATUS:
msg = 'Not running'
failed.append({'name': request_id, 'msg': msg})
else:
canceled.append({'name': request_id})
return
# Set up the batch object
batch = service.new_batch_http_request(callback=handle_cancel)
# The callback gets a "request_id" which is the operation name.
# Build a dict such that after the callback, we can lookup the operation
# objects by name
ops_by_name = {}
for op in ops:
op_name = op.get_field('internal-id')
ops_by_name[op_name] = op
batch.add(
service.operations().cancel(name=op_name, body={}),
request_id=op_name)
# Cancel the operations
batch.execute()
# Iterate through the canceled and failed lists to build our return lists
canceled_ops = [ops_by_name[cancel['name']] for cancel in canceled]
error_messages = []
for fail in failed:
message = "Error canceling '%s': %s"
op = ops_by_name[fail['name']]
message %= (op.get_operation_full_job_id(), fail['msg'])
error_messages.append(message)
return canceled_ops, error_messages
@classmethod
def cancel(cls, service, ops):
"""Cancel operations.
Args:
service: Google Genomics API service object.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
"""
# Canceling many operations one-by-one can be slow.
# The Pipelines API doesn't directly support a list of operations to cancel,
# but the requests can be performed in batch.
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = cls._cancel_batch(
service, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages
class GoogleJobProvider(base.JobProvider):
"""Interface to dsub and related tools for managing Google cloud jobs."""
def __init__(self, verbose, dry_run, project, zones=None, credentials=None):
self._verbose = verbose
self._dry_run = dry_run
self._project = project
self._zones = zones
self._service = self._setup_service(credentials)
# Exponential backoff retrying API discovery.
# Maximum 23 retries. Wait 1, 2, 4 ... 64, 64, 64... seconds.
@classmethod
@retrying.retry(
stop_max_attempt_number=23,
retry_on_exception=_retry_api_check,
wait_exponential_multiplier=1000,
wait_exponential_max=64000)
def _do_setup_service(cls, credentials):
return apiclient.discovery.build(
'genomics', 'v1alpha2', credentials=credentials)
@classmethod
def _setup_service(cls, credentials=None):
"""Configures genomics API client.
Args:
credentials: credentials to be used for the gcloud API calls.
Returns:
A configured Google Genomics API client with appropriate credentials.
"""
if not credentials:
credentials = GoogleCredentials.get_application_default()
return cls._do_setup_service(credentials)
def prepare_job_metadata(self, script, job_name, user_id):
"""Returns a dictionary of metadata fields for the job."""
# The name of the pipeline gets set into the ephemeralPipeline.name as-is.
# The default name of the pipeline is the script name
# The name of the job is derived from the job_name and gets set as a
# 'job-name' label (and so the value must be normalized).
if job_name:
pipeline_name = job_name
job_name_value = _Label.convert_to_label_chars(job_name)
else:
pipeline_name = os.path.basename(script)
job_name_value = _Label.convert_to_label_chars(
pipeline_name.split('.', 1)[0])
# The user-id will get set as a label
user_id = _Label.convert_to_label_chars(user_id)
# Now build the job-id. We want the job-id to be expressive while also
# having a low-likelihood of collisions.
#
# For expressiveness, we:
# * use the job name (truncated at 10 characters).
# * insert the user-id
# * add a datetime value
# To have a high likelihood of uniqueness, the datetime value is out to
# hundredths of a second.
#
# The full job-id is:
# <job-name>--<user-id>--<timestamp>
job_id = '%s--%s--%s' % (job_name_value[:10], user_id,
datetime.now().strftime('%y%m%d-%H%M%S-%f')[:16])
# Standard version is MAJOR.MINOR(.PATCH). This will convert the version
# string to "vMAJOR-MINOR(-PATCH)". Example; "0.1.0" -> "v0-1-0".
version = _Label.convert_to_label_chars('v%s' % DSUB_VERSION)
return {
'pipeline-name': pipeline_name,
'job-name': job_name_value,
'job-id': job_id,
'user-id': user_id,
'dsub-version': version,
}
def _build_pipeline_labels(self, task_metadata):
labels = [
_Label(name, task_metadata[name])
for name in ['job-name', 'job-id', 'user-id', 'dsub-version']
]
if task_metadata.get('task-id') is not None:
labels.append(_Label('task-id', 'task-%d' % task_metadata.get('task-id')))
return labels
def _build_pipeline_request(self, job_resources, task_metadata, job_data,
task_data):
"""Returns a Pipeline objects for the job."""
script = task_metadata['script']
task_data['labels'].extend(self._build_pipeline_labels(task_metadata))
# Build the ephemeralPipeline for this job.
# The ephemeralPipeline definition changes for each job because file
# parameters localCopy.path changes based on the remote_uri.
pipeline = _Pipelines.build_pipeline(
project=self._project,
min_cores=job_resources.min_cores,
min_ram=job_resources.min_ram,
disk_size=job_resources.disk_size,
boot_disk_size=job_resources.boot_disk_size,
preemptible=job_resources.preemptible,
image=job_resources.image,
zones=job_resources.zones,
script_name=script.name,
envs=job_data['envs'] + task_data['envs'],
inputs=job_data['inputs'] + task_data['inputs'],
outputs=job_data['outputs'] + task_data['outputs'],
pipeline_name=task_metadata['pipeline-name'])
# Build the pipelineArgs for this job.
logging_uri = providers_util.format_logging_uri(job_resources.logging.uri,
task_metadata)
pipeline.update(
_Pipelines.build_pipeline_args(self._project, script.value, job_data,
task_data, job_resources.preemptible,
logging_uri, job_resources.scopes,
job_resources.keep_alive))
return pipeline
def _submit_pipeline(self, request):
operation = _Pipelines.run_pipeline(self._service, request)
if self._verbose:
print 'Launched operation %s' % operation['name']
return GoogleOperation(operation).get_field('task-id')
def submit_job(self, job_resources, job_metadata, job_data, all_task_data):
"""Submit the job (or tasks) to be executed.
Args:
job_resources: resource parameters required by each job.
job_metadata: job parameters such as job-id, user-id, script
job_data: arguments global to the job
all_task_data: list of arguments for each task
Returns:
A dictionary containing the 'user-id', 'job-id', and 'task-id' list.
For jobs that are not task array jobs, the task-id list should be empty.
Raises:
ValueError: if job resources or task data contain illegal values.
"""
# Validate task data and resources.
param_util.validate_submit_args_or_fail(
job_resources,
job_data,
all_task_data,
provider_name=_PROVIDER_NAME,
input_providers=_SUPPORTED_INPUT_PROVIDERS,
output_providers=_SUPPORTED_OUTPUT_PROVIDERS,
logging_providers=_SUPPORTED_LOGGING_PROVIDERS)
# Prepare and submit jobs.
launched_tasks = []
requests = []
for task_data in all_task_data:
task_metadata = providers_util.get_task_metadata(job_metadata,
task_data.get('task-id'))
request = self._build_pipeline_request(job_resources, task_metadata,
job_data, task_data)
if self._dry_run:
requests.append(request)
else:
task = self._submit_pipeline(request)
if task:
launched_tasks.append(task)
# If this is a dry-run, emit all the pipeline request objects
if self._dry_run:
print json.dumps(requests, indent=2, sort_keys=True)
return {
'job-id': job_metadata['job-id'],
'user-id': job_metadata['user-id'],
'task-id': launched_tasks
}
def lookup_job_tasks(self,
status_list,
user_list=None,
job_list=None,
job_name_list=None,
task_list=None,
labels=None,
create_time=None,
max_tasks=0):
"""Return a list of operations based on the input criteria.
If any of the filters are empty or ["*"], then no filtering is performed on
that field. Filtering by both a job id list and job name list is
unsupported.
Args:
status_list: ['*'], or a list of job status strings to return. Valid
status strings are 'RUNNING', 'SUCCESS', 'FAILURE', or 'CANCELED'.
user_list: a list of ids for the user(s) who launched the job.
job_list: a list of job ids to return.
job_name_list: a list of job names to return.
task_list: a list of specific tasks within the specified job(s) to return.
labels: a list of LabelParam with user-added labels. All labels must
match the task being fetched.
create_time: a UTC value for earliest create time for a task.
max_tasks: the maximum number of job tasks to return or 0 for no limit.
Raises:
ValueError: if both a job id list and a job name list are provided
Returns:
A list of Genomics API Operations objects.
"""
# Server-side, we can filter on status, job_id, user_id, task_id, but there
# is no OR filter (only AND), and so we can't handle lists server side.
# In practice we don't expect combinations of user lists and job lists.
# For now, do the most brain-dead thing and if we find a common use-case
# that performs poorly, we can re-evaluate.
status_list = status_list if status_list else ['*']
user_list = user_list if user_list else ['*']
job_list = job_list if job_list else ['*']
job_name_list = job_name_list if job_name_list else ['*']
task_list = task_list if task_list else ['*']
# The task-id label value of "task-n" instead of just "n" is a hold-over
# from early label value character restrictions.
# Accept both forms, "task-n" and "n", for lookups by task-id.
task_list = ['task-{}'.format(t) if t.isdigit() else t for t in task_list]
if set(job_list) != set(['*']) and set(job_name_list) != set(['*']):
raise ValueError(
'Filtering by both job IDs and job names is not supported')
# AND filter rule arguments.
labels = labels if labels else []
tasks = []
for status, job_id, job_name, user_id, task_id in itertools.product(
status_list, job_list, job_name_list, user_list, task_list):
ops_filter = _Operations.get_filter(
self._project,
status=status,
user_id=user_id,
job_id=job_id,
job_name=job_name,
labels=labels,
task_id=task_id,
create_time=create_time)
ops = _Operations.list(self._service, ops_filter, max_tasks)
if ops:
tasks.extend(ops)
if max_tasks and len(tasks) > max_tasks:
del tasks[max_tasks:]
return tasks
return tasks
def delete_jobs(self,
user_list,
job_list,
task_list,
labels,
create_time=None):
"""Kills the operations associated with the specified job or job.task.
Args:
user_list: List of user ids who "own" the job(s) to cancel.
job_list: List of job_ids to cancel.
task_list: List of task-ids to cancel.
labels: List of LabelParam, each must match the job(s) to be canceled.
create_time: a UTC value for earliest create time for a task.
Returns:
A list of tasks canceled and a list of error messages.
"""
# Look up the job(s)
tasks = self.lookup_job_tasks(
['RUNNING'],
user_list=user_list,
job_list=job_list,
task_list=task_list,
labels=labels,
create_time=create_time)
print 'Found %d tasks to delete.' % len(tasks)
return _Operations.cancel(self._service, tasks)
def get_tasks_completion_messages(self, tasks):
completion_messages = []
for task in tasks:
errmsg = task.error_message()
completion_messages.append(errmsg)
return completion_messages
class GoogleOperation(base.Task):
"""Task wrapper around a Pipelines API operation object."""
def __init__(self, operation_data):
self._op = operation_data
# Sanity check for operation_status().
unused_status = self.operation_status()
def raw_task_data(self):
return self._op
def get_field(self, field, default=None):
"""Returns a value from the operation for a specific set of field names.
Args:
field: a dsub-specific job metadata key
default: default value to return if field does not exist or is empty.
Returns:
A text string for the field or a list for 'inputs'.
Raises:
ValueError: if the field label is not supported by the operation
"""
metadata = self._op.get('metadata')
value = None
if field == 'internal-id':
value = self._op['name']
elif field == 'job-name':
value = metadata['labels'].get('job-name')
elif field == 'job-id':
value = metadata['labels'].get('job-id')
elif field == 'task-id':
value = metadata['labels'].get('task-id')
elif field == 'user-id':
value = metadata['labels'].get('user-id')
elif field == 'task-status':
value = self.operation_status()
elif field == 'logging':
value = metadata['request']['pipelineArgs']['logging']['gcsPath']
elif field == 'envs':
value = self._get_operation_input_field_values(metadata, False)
elif field == 'labels':
# Reserved labels are filtered from dsub task output.
value = {k: v for k, v in metadata['labels'].items()
if k not in param_util.RESERVED_LABELS}
elif field == 'inputs':
value = self._get_operation_input_field_values(metadata, True)
elif field == 'outputs':
value = metadata['request']['pipelineArgs']['outputs']
elif field == 'create-time':
value = self._parse_datestamp(metadata['createTime'])
elif field == 'start-time':
# Look through the events list for all "start" events (only one expected).
start_events = [
e for e in metadata.get('events', []) if e['description'] == 'start'
]
# Get the startTime from the last "start" event.
if start_events:
value = self._parse_datestamp(start_events[-1]['startTime'])
elif field == 'end-time':
if 'endTime' in metadata:
value = self._parse_datestamp(metadata['endTime'])
elif field == 'status':
value = self.operation_status()
elif field in ['status-message', 'status-detail']:
status, last_update = self.operation_status_message()
value = status
elif field == 'last-update':
status, last_update = self.operation_status_message()
value = last_update
else:
raise ValueError('Unsupported field: "%s"' % field)
return value if value else default
def operation_status(self):
"""Returns the status of this operation.
ie. RUNNING, SUCCESS, CANCELED or FAILURE.
Returns:
A printable status string
"""
if not self._op['done']:
return 'RUNNING'
if 'error' not in self._op:
return 'SUCCESS'
if self._op['error'].get('code', 0) == 1:
return 'CANCELED'
return 'FAILURE'
def operation_status_message(self):
"""Returns the most relevant status string and last updated date string.
This string is meant for display only.
Returns:
A printable status string and date string.
"""
metadata = self._op['metadata']
if not self._op['done']:
if 'events' in metadata and metadata['events']:
# Get the last event
last_event = metadata['events'][-1]
msg = last_event['description']
ds = last_event['startTime']
else:
msg = 'Pending'
ds = metadata['createTime']
else:
ds = metadata['endTime']
if 'error' in self._op:
# Shorten message if it's too long.
msg = self._op['error']['message']
else:
msg = 'Success'
return (msg, self._parse_datestamp(ds))
def get_operation_full_job_id(self):
"""Returns the job-id or job-id.task-id for the operation."""
job_id = self.get_field('job-id')
task_id = self.get_field('task-id')
if task_id:
return '%s.%s' % (job_id, task_id)
else:
return job_id
@staticmethod
def _parse_datestamp(datestamp):
"""Converts a datestamp from RFC3339 UTC to a datetime.
Args:
datestamp: a datetime string in RFC3339 UTC "Zulu" format
Returns:
A datetime.
"""
# The timestamp from the Google Operations are all in RFC3339 format, but
# they are sometimes formatted to nanoseconds and sometimes only seconds.
# Parse both:
# * 2016-11-14T23:04:55Z
# * 2016-11-14T23:05:56.010429380Z
# And any sub-second precision in-between.
m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).*Z',
datestamp)
# It would be unexpected to get a different date format back from Google.
# If we raise an exception here, we can break people completely.
# Instead, let's just return None and people can report that some dates
# are not showing up.
if not m:
return None
# Create a UTC datestamp from parsed components
g = [int(val) for val in m.groups()]
return datetime(g[0], g[1], g[2], g[3], g[4], g[5], tzinfo=pytz.utc)
@classmethod
def _get_operation_input_field_values(cls, metadata, file_input):
"""Returns a dictionary of envs or file inputs for an operation.
Args:
metadata: operation metadata field
file_input: True to return a dict of file inputs, False to return envs.
Returns:
A dictionary of input field name value pairs
"""
# To determine input parameter type, we iterate through the
# pipeline inputParameters.
# The values come from the pipelineArgs inputs.
input_args = metadata['request']['ephemeralPipeline']['inputParameters']
vals_dict = metadata['request']['pipelineArgs']['inputs']
# Get the names for files or envs
names = [
arg['name'] for arg in input_args if ('localCopy' in arg) == file_input
]
# Build the return dict
return {name: vals_dict[name] for name in names if name in vals_dict}
def error_message(self):
"""Returns an error message if the operation failed for any reason.
Failure as defined here means; ended for any reason other than 'success'.
This means that a successful cancelation will also create an error message
here.
Returns:
string, string will be empty if job did not error.
"""
if 'error' in self._op:
if 'task-id' in self._op['metadata']['labels']:
job_id = self._op['metadata']['labels']['task-id']
else:
job_id = self._op['metadata']['labels']['job-id']
return 'Error in job %s - code %s: %s' % (
job_id, self._op['error']['code'], self._op['error']['message'])
else:
return ''
if __name__ == '__main__':
pass
|
e4p/dsub
|
dsub/providers/google.py
|
Python
|
apache-2.0
| 46,657 |
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509 as c_x509
from cryptography.x509.oid import NameOID
from magnum.common.x509 import operations
from magnum.tests import base
class TestX509(base.BaseTestCase):
def setUp(self):
super(TestX509, self).setUp()
self.issuer_name = six.u("fake-issuer")
self.subject_name = six.u("fake-subject")
self.ca_encryption_password = six.b("fake-ca-password")
self.encryption_password = six.b("fake-password")
def _load_pems(self, keypairs, encryption_password):
private_key = serialization.load_pem_private_key(
keypairs['private_key'],
password=encryption_password,
backend=default_backend(),
)
certificate = c_x509.load_pem_x509_certificate(
keypairs['certificate'], default_backend())
return certificate, private_key
def _generate_ca_certificate(self, issuer_name=None):
issuer_name = issuer_name or self.issuer_name
keypairs = operations.generate_ca_certificate(
issuer_name, encryption_password=self.ca_encryption_password)
return self._load_pems(keypairs, self.ca_encryption_password)
def _generate_client_certificate(self, issuer_name, subject_name):
ca = operations.generate_ca_certificate(
self.issuer_name, encryption_password=self.ca_encryption_password)
keypairs = operations.generate_client_certificate(
self.issuer_name,
self.subject_name,
ca['private_key'],
encryption_password=self.encryption_password,
ca_key_password=self.ca_encryption_password,
)
return self._load_pems(keypairs, self.encryption_password)
def _public_bytes(self, public_key):
return public_key.public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def _generate_private_key(self):
return rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
def _build_csr(self, private_key):
csr = c_x509.CertificateSigningRequestBuilder()
csr = csr.subject_name(c_x509.Name([
c_x509.NameAttribute(NameOID.COMMON_NAME, self.subject_name)
]))
return csr.sign(private_key, hashes.SHA256(), default_backend())
def assertHasPublicKey(self, keypairs):
key = keypairs[1]
cert = keypairs[0]
self.assertEqual(self._public_bytes(key.public_key()),
self._public_bytes(cert.public_key()))
def assertHasSubjectName(self, cert, subject_name):
actual_subject_name = cert.subject.get_attributes_for_oid(
c_x509.NameOID.COMMON_NAME)
actual_subject_name = actual_subject_name[0].value
self.assertEqual(subject_name, actual_subject_name)
def assertHasIssuerName(self, cert, issuer_name):
actual_issuer_name = cert.issuer.get_attributes_for_oid(
c_x509.NameOID.COMMON_NAME)
actual_issuer_name = actual_issuer_name[0].value
self.assertEqual(issuer_name, actual_issuer_name)
def assertInClientExtensions(self, cert):
key_usage = c_x509.KeyUsage(True, False, True, False, False, False,
False, False, False)
key_usage = c_x509.Extension(key_usage.oid, True, key_usage)
extended_key_usage = c_x509.ExtendedKeyUsage([c_x509.OID_CLIENT_AUTH])
extended_key_usage = c_x509.Extension(extended_key_usage.oid, False,
extended_key_usage)
basic_constraints = c_x509.BasicConstraints(ca=False, path_length=None)
basic_constraints = c_x509.Extension(basic_constraints.oid, True,
basic_constraints)
self.assertIn(key_usage, cert.extensions)
self.assertIn(extended_key_usage, cert.extensions)
self.assertIn(basic_constraints, cert.extensions)
def test_generate_ca_certificate_with_bytes_issuer_name(self):
issuer_name = six.b("bytes-issuer-name")
cert, _ = self._generate_ca_certificate(issuer_name)
issuer_name = six.u(issuer_name)
self.assertHasSubjectName(cert, issuer_name)
self.assertHasIssuerName(cert, issuer_name)
def test_generate_ca_certificate_has_publickey(self):
keypairs = self._generate_ca_certificate(self.issuer_name)
self.assertHasPublicKey(keypairs)
def test_generate_ca_certificate_set_subject_name(self):
cert, _ = self._generate_ca_certificate(self.issuer_name)
self.assertHasSubjectName(cert, self.issuer_name)
def test_generate_ca_certificate_set_issuer_name(self):
cert, _ = self._generate_ca_certificate(self.issuer_name)
self.assertHasIssuerName(cert, self.issuer_name)
def test_generate_ca_certificate_set_extentions_as_ca(self):
cert, _ = self._generate_ca_certificate(self.issuer_name)
key_usage = c_x509.KeyUsage(False, False, False, False, False, True,
False, False, False)
key_usage = c_x509.Extension(key_usage.oid, True, key_usage)
basic_constraints = c_x509.BasicConstraints(ca=True, path_length=0)
basic_constraints = c_x509.Extension(basic_constraints.oid, True,
basic_constraints)
self.assertIn(key_usage, cert.extensions)
self.assertIn(basic_constraints, cert.extensions)
def test_generate_client_certificate_has_publickey(self):
keypairs = self._generate_client_certificate(
self.issuer_name, self.subject_name)
self.assertHasPublicKey(keypairs)
def test_generate_client_certificate_set_subject_name(self):
cert, _ = self._generate_client_certificate(
self.issuer_name, self.subject_name)
self.assertHasSubjectName(cert, self.subject_name)
def test_generate_client_certificate_set_issuer_name(self):
cert, key = self._generate_client_certificate(
self.issuer_name, self.subject_name)
self.assertHasIssuerName(cert, self.issuer_name)
def test_generate_client_certificate_set_extentions_as_client(self):
cert, key = self._generate_client_certificate(
self.issuer_name, self.subject_name)
self.assertInClientExtensions(cert)
@mock.patch('cryptography.x509.load_pem_x509_csr')
@mock.patch('six.b')
def test_sign_with_unicode_csr(self, mock_six, mock_load_pem):
ca_key = self._generate_private_key()
private_key = self._generate_private_key()
csr_obj = self._build_csr(private_key)
csr = csr_obj.public_bytes(serialization.Encoding.PEM)
csr = six.u(csr)
mock_load_pem.return_value = csr_obj
operations.sign(csr, self.issuer_name, ca_key,
skip_validation=True)
mock_six.assert_called_once_with(csr)
|
dimtruck/magnum
|
magnum/tests/unit/common/x509/test_sign.py
|
Python
|
apache-2.0
| 7,885 |
"""RFC 6962 client API."""
from ct.client import log_client
from ct.client.db import database
import gflags
import logging
import random
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import protocol
from twisted.internet import reactor as ireactor
from twisted.internet import task
from twisted.internet import threads
from twisted.python import failure
from twisted.web import client
from twisted.web import http
from twisted.web import iweb
from Queue import Queue
from zope.interface import implements
logging = logging.getLogger('async_log_client.py')
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("max_fetchers_in_parallel", 100, "Maximum number of "
"concurrent fetches.")
gflags.DEFINE_integer("get_entries_retry_delay", 1, "Number of seconds after "
"which get-entries will be retried if it encountered "
"an error.")
gflags.DEFINE_integer("entries_buffer", 100000, "Size of buffer which stores "
"fetched entries before async log client is able to "
"return them. 100000 entries shouldn't take more "
"than 600 Mb of memory.")
gflags.DEFINE_integer("response_buffer_size_bytes", 50 * 1000 * 1000, "Maximum "
"size of a single response buffer. Should be set such "
"that a get_entries response comfortably fits in the "
"the buffer. A typical log entry is expected to be < "
"10kB.")
gflags.DEFINE_bool("persist_entries", True, "Cache entries on disk.")
class HTTPConnectionError(log_client.HTTPError):
"""Connection failed."""
pass
class HTTPResponseSizeExceededError(log_client.HTTPError):
"""HTTP response exceeded maximum permitted size."""
pass
###############################################################################
# The asynchronous twisted log client. #
###############################################################################
class ResponseBodyHandler(protocol.Protocol):
"""Response handler for HTTP requests."""
def __init__(self, finished):
"""Initialize the one-off response handler.
Args:
finished: a deferred that will be fired with the body when the
complete response has been received; or with an error when the
connection is lost.
"""
self._finished = finished
def connectionMade(self):
self._buffer = []
self._len = 0
self._overflow = False
def dataReceived(self, data):
self._len += len(data)
if self._len > FLAGS.response_buffer_size_bytes:
# Note this flag has to be set *before* calling loseConnection()
# to ensure connectionLost gets called with the flag set.
self._overflow = True
self.transport.loseConnection()
else:
self._buffer.append(data)
def connectionLost(self, reason):
if self._overflow:
self._finished.errback(HTTPResponseSizeExceededError(
"Connection aborted: response size exceeded %d bytes" %
FLAGS.response_buffer_size_bytes))
elif not reason.check(*(error.ConnectionDone, client.ResponseDone,
http.PotentialDataLoss)):
self._finished.errback(HTTPConnectionError(
"Connection lost (received %d bytes)" % self._len))
else:
body = "".join(self._buffer)
self._finished.callback(body)
class AsyncRequestHandler(object):
"""A helper for asynchronous response body delivery."""
def __init__(self, agent):
self._agent = agent
@staticmethod
def _response_cb(response):
try:
log_client.RequestHandler.check_response_status(
response.code, response.phrase,
list(response.headers.getAllRawHeaders()))
except log_client.HTTPError as e:
return failure.Failure(e)
finished = defer.Deferred()
response.deliverBody(ResponseBodyHandler(finished))
return finished
@staticmethod
def _make_request(path, params):
if not params:
return path
return path + "?" + "&".join(["%s=%s" % (key, value)
for key, value in params.iteritems()])
def get(self, path, params=None):
d = self._agent.request("GET", self._make_request(path, params))
d.addCallback(self._response_cb)
return d
class EntryProducer(object):
"""A push producer for log entries."""
implements(iweb.IBodyProducer)
def __init__(self, handler, reactor, uri, start, end,
batch_size, entries_db=None):
self._handler = handler
self._reactor = reactor
self._uri = uri
self._entries_db = entries_db
self._consumer = None
assert 0 <= start <= end
self._start = start
self._end = end
self._current = self._start
self._batch_size = batch_size
self._batches = Queue()
self._currently_fetching = 0
self._currently_stored = 0
self._last_fetching = self._current
self._max_currently_fetching = (FLAGS.max_fetchers_in_parallel *
self._batch_size)
# Required attribute of the interface.
self.length = iweb.UNKNOWN_LENGTH
self.min_delay = FLAGS.get_entries_retry_delay
@property
def finished(self):
return self._current > self._end
def __fail(self, failure):
if not self._stopped:
self.stopProducing()
self._done.errback(failure)
@staticmethod
def _calculate_retry_delay(retries):
"""Calculates delay based on number of retries which already happened.
Random is there, so we won't attack server lots of requests exactly
at the same time, and 1.3 is nice constant for exponential back-off."""
return ((0.4 + random.uniform(0.3, 0.6)) * FLAGS.get_entries_retry_delay
* 1.4**retries)
def _response_eb(self, failure, first, last, retries):
"""Error back for HTTP errors"""
if not self._paused:
# if it's not last retry and failure wasn't our fault we retry
if (retries < FLAGS.get_entries_max_retries and
not failure.check(log_client.HTTPClientError)):
logging.info("Error (%s): %s" % (self._uri, failure))
logging.info("Retrying get-entries for range <%d, %d> retry: %d"
% (first, last, retries))
d = task.deferLater(self._reactor,
self._calculate_retry_delay(retries),
self._fetch_parsed_entries,
first, last)
d.addErrback(self._response_eb, first, last, retries + 1)
return d
else:
self.__fail(failure)
def _fetch_eb(self, failure):
"""Error back for errors after getting result of a request
(InvalidResponse)"""
self.__fail(failure)
def _write_pending(self):
d = defer.Deferred()
d.callback(None)
if self._pending:
self._current += len(self._pending)
self._currently_stored -= len(self._pending)
d = self._consumer.consume(self._pending)
self._pending = None
return d
def _batch_completed(self, result):
self._currently_fetching -= len(result)
self._currently_stored += len(result)
return result
def _store_batch(self, entry_batch, start_index):
assert self._entries_db
d = threads.deferToThread(self._entries_db.store_entries,
enumerate(entry_batch, start_index))
d.addCallback(lambda _: entry_batch)
return d
def _get_entries_from_db(self, first, last):
if FLAGS.persist_entries and self._entries_db:
d = threads.deferToThread(self._entries_db.scan_entries, first, last)
d.addCallbacks(lambda entries: list(entries))
d.addErrback(lambda fail: fail.trap(database.KeyError) and None)
return d
else:
d = defer.Deferred()
d.callback(None)
return d
def _fetch_parsed_entries(self, first, last):
# first check in database
d = self._get_entries_from_db(first, last)
d.addCallback(self._sub_fetch_parsed_entries, first, last)
return d
def _sub_fetch_parsed_entries(self, entries, first, last):
# it's not the best idea to attack server with many requests exactly at
# the same time, so requests are sent after slight delay.
if not entries:
request = task.deferLater(self._reactor,
self._calculate_retry_delay(0),
self._handler.get,
self._uri + "/" +
log_client._GET_ENTRIES_PATH,
params={"start": str(first),
"end": str(last)})
request.addCallback(log_client._parse_entries, last - first + 1)
if self._entries_db and FLAGS.persist_entries:
request.addCallback(self._store_batch, first)
entries = request
else:
deferred_entries = defer.Deferred()
deferred_entries.callback(entries)
entries = deferred_entries
return entries
def _create_next_request(self, first, last, entries, retries):
d = self._fetch_parsed_entries(first, last)
d.addErrback(self._response_eb, first, last, retries)
d.addCallback(lambda result: (entries + result, len(result)))
d.addCallback(self._fetch, first, last, retries)
return d
def _fetch(self, result, first, last, retries):
entries, last_fetched_entries_count = result
next_range_start = first + last_fetched_entries_count
if next_range_start > last:
return entries
return self._create_next_request(next_range_start, last,
entries, retries)
def _create_fetch_deferred(self, first, last, retries=0):
d = defer.Deferred()
d.addCallback(self._fetch, first, last, retries)
d.addCallback(self._batch_completed)
d.addErrback(self._fetch_eb)
d.callback(([], 0))
return d
@defer.deferredGenerator
def produce(self):
"""Produce entries."""
while not self._paused:
wfd = defer.waitForDeferred(self._write_pending())
yield wfd
wfd.getResult()
if self.finished:
self.finishProducing()
return
first = self._last_fetching
while (self._currently_fetching <= self._max_currently_fetching and
self._last_fetching <= self._end and
self._currently_stored <= FLAGS.entries_buffer):
last = min(self._last_fetching + self._batch_size - 1, self._end,
self._last_fetching + self._max_currently_fetching
- self._currently_fetching + 1)
self._batches.put(self._create_fetch_deferred(first, last))
self._currently_fetching += last - first + 1
first = last + 1
self._last_fetching = first
wfd = defer.waitForDeferred(self._batches.get())
# Pause here until the body of the response is available.
yield wfd
# The producer may have been paused while waiting for the response,
# or errored out upon receiving it: do not write the entries out
# until after the next self._paused check.
self._pending = wfd.getResult()
def startProducing(self, consumer):
"""Start producing entries.
The producer writes EntryResponse protos to the consumer in batches,
until all entries have been received, or an error occurs.
Args:
consumer: the consumer to write to.
Returns:
a deferred that fires when no more entries will be written.
Upon success, this deferred fires number of produced entries or
None if production wasn't successful. Upon failure, this deferred
fires with the appropriate HTTPError.
Raises:
RuntimeError: consumer already registered.
"""
if self._consumer:
raise RuntimeError("Producer already has a consumer registered")
self._consumer = consumer
self._stopped = False
self._paused = True
self._pending = None
self._done = defer.Deferred()
# An IBodyProducer should start producing immediately, without waiting
# for an explicit resumeProducing() call.
task.deferLater(self._reactor, 0, self.resumeProducing)
return self._done
def pauseProducing(self):
self._paused = True
def resumeProducing(self):
if self._paused and not self._stopped:
self._paused = False
d = self.produce()
d.addErrback(self.finishProducing)
def stopProducing(self):
self._paused = True
self._stopped = True
def finishProducing(self, failure=None):
self.stopProducing()
if not failure:
self._done.callback(self._end - self._start + 1)
else:
self._done.errback(failure)
class AsyncLogClient(object):
"""A twisted log client."""
def __init__(self, agent, uri, entries_db=None, reactor=ireactor):
"""Initialize the client.
If entries_db is specified and flag persist_entries is true, get_entries
will return stored entries.
Args:
agent: the agent to use.
uri: the uri of the log.
entries_db: object that conforms TempDB API
reactor: the reactor to use. Default is twisted.internet.reactor.
"""
self._handler = AsyncRequestHandler(agent)
#twisted expects bytes, so if uri is unicode we have to change encoding
self._uri = uri.encode('ascii')
self._reactor = reactor
self._entries_db = entries_db
@property
def servername(self):
return self._uri
def get_sth(self):
"""Get the current Signed Tree Head.
Returns:
a Deferred that fires with a ct.proto.client_pb2.SthResponse proto.
Raises:
HTTPError, HTTPConnectionError, HTTPClientError,
HTTPResponseSizeExceededError, HTTPServerError: connection failed.
For logs that honour HTTP status codes, HTTPClientError (a 4xx)
should never happen.
InvalidResponseError: server response is invalid for the given
request.
"""
deferred_result = self._handler.get(self._uri + "/" +
log_client._GET_STH_PATH)
deferred_result.addCallback(log_client._parse_sth)
return deferred_result
def get_entries(self, start, end, batch_size=0):
"""Retrieve log entries.
Args:
start: index of first entry to retrieve.
end: index of last entry to retrieve.
batch_size: max number of entries to fetch in one go.
Returns:
an EntryProducer for the given range.
Raises:
InvalidRequestError: invalid request range (irrespective of log).
Caller is responsible for ensuring that (start, end) is a valid range
(by retrieving an STH first), otherwise a HTTPClientError may occur
during production.
"""
# Catch obvious mistakes here.
if start < 0 or end < 0 or start > end:
raise log_client.InvalidRequestError(
"Invalid range [%d, %d]" % (start, end))
batch_size = batch_size or FLAGS.entry_fetch_batch_size
return EntryProducer(self._handler, self._reactor, self._uri,
start, end, batch_size, self._entries_db)
def get_sth_consistency(self, old_size, new_size):
"""Retrieve a consistency proof.
Args:
old_size : size of older tree.
new_size : size of newer tree.
Returns:
a Deferred that fires with list of raw hashes (bytes) forming the
consistency proof
Raises:
HTTPError, HTTPClientError, HTTPServerError: connection failed,
or returned an error. HTTPClientError can happen when
(old_size, new_size) are not valid for this log (e.g. greater
than the size of the log).
InvalidRequestError: invalid request size (irrespective of log).
InvalidResponseError: server response is invalid for the given
request
Caller is responsible for ensuring that (old_size, new_size) are valid
(by retrieving an STH first), otherwise a HTTPClientError may occur.
"""
if old_size > new_size:
raise log_client.InvalidRequestError(
"old > new: %s >= %s" % (old_size, new_size))
if old_size < 0 or new_size < 0:
raise log_client.InvalidRequestError(
"both sizes must be >= 0: %s, %s" % (old_size, new_size))
# don't need to contact remote server for trivial proofs:
# - empty tree is consistent with everything
# - everything is consistent with itself
if old_size == 0 or old_size == new_size:
d = defer.Deferred()
d.callback([])
return d
deferred_response = self._handler.get(
self._uri + "/" +
log_client._GET_STH_CONSISTENCY_PATH,
params={"first": old_size, "second": new_size})
deferred_response.addCallback(log_client._parse_consistency_proof,
self.servername)
return deferred_response
|
lexibrent/certificate-transparency
|
python/ct/client/async_log_client.py
|
Python
|
apache-2.0
| 18,435 |
import just
from scrapy.spiders import Spider
class MySpider(Spider):
name = "github_spider"
allowed_domains = ["github.com"]
with open("../repos.txt") as f:
start_urls = [url.strip() for url in f.readlines()]
def parse(self, response):
base = "https://raw.githubusercontent.com"
content = response.text.encode("utf-8")
just.write(content, "data" + response.url[len(base):])
|
warenlg/shell-complete
|
shcomplete/repospider.py
|
Python
|
apache-2.0
| 428 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import urllib
import zlib
import zipfile
import math
import sys
import json
import time
import bz2
import gzip
import binascii
import requests
import random
from subprocess import *
import subprocess
import threading
import MySQLdb # See http://stackoverflow.com/questions/372885/how-do-i-connect-to-a-mysql-database-in-python
import MySQLdb.cursors
from multiprocessing import Process, Manager
# This script will download the data from the highlander database, create a json from it, then upload it to
# the cgs system, which is, for now, constituted of a hbase database where it will save the data.
# Why do we download the data from highlander instead of using directly the parser from dbBuilder?
# Because the current dbBuilder.tojson does not give as much information as we would like for the benchmarks, that's all.
# Configuration for the user
highlander_host = "highlander.usr.hydra.vub.ac.be"
highlander_host = "172.31.244.166"
highlander_database = "Iridia"
highlander_user = "iridia"
highlander_password = "iri.2742"
local_host = "127.0.0.1"
local_database = "highlander_chromosomes"
local_user = "root"
local_password = "Olgfe65grgr"
current_server_url = 'http://62.210.254.52'
cluster_url = 'http://insilicodb.ulb.ac.be:8888'
querySession = requests.Session()
info = {'username':'gdegols','password':'z9FNeTrQJYaemAtyUVva'}
r = querySession.post(cluster_url+'/accounts/login/',data=info)
target_database = "hbase" # "hbase" or "impala_text"
global_upload_state = False # If False, we download the data. If True, we upload the data previously downloaded.
# This function returns the different samples already uploaded to hbase
def isSampleDone(sample_name, current_upload_state):
if not os.path.isfile('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt'):
return False
samples = [line.strip() for line in open('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt')]
found = False
sample_name = str(sample_name)
for sample in samples:
if sample and sample_name == sample:
found = True
break
return found
def addSampleDone(sample, current_upload_state):
with open('cluster_'+target_database+'_samples_done_'+str(current_upload_state)+'.txt', 'a') as file:
file.write(str(sample)+'\r\n')
def fieldsToCheck():
with open('api.json', 'rb') as f:
fields = f.read()
fields = json.loads(fields)
# We create a list to keep/recreate the order
ordered_fields = []
for i in xrange(0,len(fields)):
ordered_fields.append(fields['c'+str(i)])
# Thanks to this code, the mapping will be 40% faster
new_fields = {}
for key in fields:
field = fields[key]
new_fields[field['highlander']] = field['json']
return new_fields, ordered_fields
# This function returns the genotype (0/0, 1/1, 0/1, 1/0 only) from a "highlander variant"
def genotypeFromVariant(variant):
if variant['zygosity'] == 'Homozygous':
if random.random() < 0.5:
return '1|1'
else:
return '0|0'
else:
if random.random() < 0.5:
return '0|1'
else:
return '1|0'
# This function is in charge to create an adapted json for the benchmarks
def tojsonForBenchmarks(variants, patient):
fields, ordered_fields = fieldsToCheck()
data = {}
i=0
for variant in variants:
data[i] = {}
# We try to match any data from highlander to json
for highlander_field in variant:
if highlander_field in fields:
data[i][fields[highlander_field]] = str(variant[highlander_field]).replace(';',',')
# Some specific information
data[i]['readGroupSets.readGroups.sampleId'] = patient # variant['project_id']
data[i]['variants.fileformat'] = 'VCFv4.1'
if variant['allelic_depth_ref'] and variant['allelic_depth_alt']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_ref'] + "," + variant['allelic_depth_alt']
elif variant['allelic_depth_ref']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_ref']
elif variant['allelic_depth_alt']:
data[i]['variants.calls.info.confidence_by_depth'] = variant['allelic_depth_alt']
data[i]['variants.info.insert_date'] = int(time.time())
data[i]['variants.calls.genotype'] = genotypeFromVariant(variant)
i += 1
return data
# This function is in charge to create an adapted tsv for the benchmarks
def totsvForBenchmarks(variants, patient):
fields, ordered_fields = fieldsToCheck()
fields_map = {}
for field_id in xrange(0,len(ordered_fields)):
fields_map[ordered_fields[field_id]['highlander']] = field_id
"""
init_map = {}
for field_id in xrange(0,len(ordered_fields)):
init_map[ordered_fields[field_id]['highlander']] = ''
"""
tsv = []
dt = 0
for variant in variants:
# Some specific information
if variant['allelic_depth_ref'] and variant['allelic_depth_alt']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_ref'] + "," + variant['allelic_depth_alt']
elif variant['allelic_depth_ref']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_ref']
elif variant['allelic_depth_alt']:
variant['genotype_likelihood_hom_ref,genotype_likelihood_het,genotype_likelihood_hom_alt'] = variant['allelic_depth_alt']
variant['insert_date'] = int(time.time())
variant['special_genotype'] = genotypeFromVariant(variant)
variant['special_fileformat'] = 'VCFv4.1'
# We create the row-key
rowkey = str(variant['project_id']) + '-' + str(variant['chr']) + '-' \
+ str(variant['pos']) + '-' + str(variant['reference']) + '-' \
+ str(variant['alternative'])
line = rowkey
# It took me some times to find the most efficient way to create the tsv line, but
# maybe there is another way to do that even faster... It takes 11.5s for the current loop
val = [''] * len(ordered_fields)
for field_name, field_place in fields_map.iteritems():
try:
if variant[field_name]:
if field_name != 'unisnp_ids' and field_name != 'dbsnp_id_141' and field_name != 'dbsnp_id_137':
val[field_place] = str(variant[field_name])
else:
val[field_place] = str(variant[field_name]).replace(';',',')
except:
pass
line += ';'.join(val)
""" 9s
j = 0
for field in ordered_fields:
try:
if variant[field['highlander']]:
j += 1
else:
j += 1
except:
j += 1
"""
""" 19s
for field in ordered_fields:
if field['highlander'] in variant and variant[field['highlander']]:
line += ';'+str(variant[field['highlander']]).replace(';',',')
else:
line += ';'
"""
""" 16s
current_map = init_map.copy()
for field, value in variant.iteritems():
if field != 'unisnp_ids':
current_map[field] = str(value)
else:
current_map[field] = str(value).replace(';',',') #.replace(';',',')
for field in ordered_fields:
line += ';'+current_map[field['highlander']]
"""
""" 16s
for field in ordered_fields:
try:
if variant[field['highlander']]:
if field['highlander'] == 'unisnp_ids':
line += ';'+str(variant[field['highlander']]).replace(';',',')
else:
line += ';'+str(variant[field['highlander']])
else:
line += ';'
except:
line += ';'
"""
tsv.append(line)
return '\n'.join(tsv)
# This function save the current variants for later
def saveForLater(cur, patient, benchmark_table):
# We download the variants for the given patient
print(patient+": Downloading data."),
st = time.time()
cur.execute("SELECT * FROM "+benchmark_table+" WHERE patient = '"+patient+"' ORDER BY id") # 15s
print("Time: "+str(round(time.time()-st,2))+"s.")
if cur.rowcount < 40000:
print(patient+": Probably incomplete data found (rows = "+str(cur.rowcount)+" < 40 000), we stop here.")
return False
# We convert the variants to a json object
print(patient+": Converting data ("+str(cur.rowcount)+" lines)."),
st = time.time()
variants = tojsonForBenchmarks(cur.fetchall(), patient)
print("Time: "+str(round(time.time()-st,2))+"s.")
# For test only
# return testIfCompressWorthIt(variants)
# We save the file to the current web server
print(patient+": Saving compressed data. "),
server_directory = '/var/www/html/cgs-41gre4gre4htrhtrthtjhty'
st = time.time()
t = json.dumps(variants)
f = gzip.open(server_directory+'/hbase_upload_'+str(patient)+'_'+benchmark_table+'.txt.gz', 'wb')
f.write(t)
f.close()
print("Time: "+str(round(time.time()-st,2))+"s.")
return True
def launchPatientToTSV(patient, benchmark_table, procnum, return_dict):
t = patientToTSV()
t.setBenchmarkTable(benchmark_table)
t.setPatient(patient)
res = t.launch()
return_dict[procnum] = res
class patientToTSV():
m_patient = None
m_benchmark_table = None
def setPatient(self, patient):
self.m_patient = patient
def setBenchmarkTable(self, benchmark_table):
self.m_benchmark_table = benchmark_table
def launch(self):
patient = self.m_patient
benchmark_table = self.m_benchmark_table
attempts = 0
connexion = None
while connexion is None:
try:
connexion = MySQLdb.connect(host= highlander_host, user=highlander_user, passwd=highlander_password,db=highlander_database, cursorclass=MySQLdb.cursors.DictCursor, compress=False)
cur = connexion.cursor()
except:
print(patient+": Failed to connect.")
time.sleep(1.0)
connexion = None
attempts += 1
if attempts > 3:
print(patient+": abort connexion.")
return ''
t = patient+": Downloading data. "
st = time.time()
cur.execute("SELECT * FROM "+benchmark_table+" WHERE patient = '"+patient+"' ORDER BY id") # 15s
print(t+ "Time: "+str(round(time.time()-st,2))+"s.")
if cur.rowcount < 40000:
print(patient+": Probably incomplete data found (rows = "+str(cur.rowcount)+" < 40 000), we stop here.")
return ''
# We convert the variants to a tsv text
t = patient+": Converting data ("+str(cur.rowcount)+" lines). "
st = time.time()
variants = totsvForBenchmarks(cur.fetchall(), patient)
print(t+"Time: "+str(round(time.time()-st,2))+"s.")
cur.close()
connexion.close()
# We return the text
return variants
# This function should be only use for benchmarks purposes as we don't use json anymore
def saveToTSV(cur, sample, last_sample, benchmark_table):
print("Saving samples ["+str(sample)+";"+str(last_sample)+"[")
# We open the file
server_directory = '/var/www/html/cgs-41gre4gre4htrhtrthtjhty'
patient = 'NA'+(str(sample).zfill(5))
f = gzip.open(server_directory+'/hbase_upload_'+str(patient)+'_'+benchmark_table+'.tsv.gz', 'wb')
processes = []
max_processes = 5
st_init = time.time()
manager = Manager()
return_dict = manager.dict()
for sample in xrange(sample, last_sample):
patient = 'NA'+(str(sample).zfill(5))
if len(processes) == 0:
st_init = time.time()
manager = Manager()
return_dict = manager.dict()
d = Process(name='launchPatientToTSV', target=launchPatientToTSV, args=(patient, benchmark_table, len(processes), return_dict))
d.daemon = True
d.start()
processes.append(d)
if len(processes) >= max_processes:
for d in processes:
d.join()
# We save the file to the current web server
print("Saving (compressed) tsv data for the different samples. "),
st = time.time()
for res in xrange(0, len(processes)):
f.write(return_dict[res])
print("Time: "+str(round(time.time()-st,2))+"s.")
print(str(max_processes)+" samples done in "+str(round(time.time()-st_init,2))+"s")
st_init = time.time()
processes = []
manager = Manager()
return_dict = manager.dict()
if len(processes) >= 1:
for t in processes:
t.join()
# We save the file to the current web server
print("Saving (compressed) tsv data for the different samples. "),
st = time.time()
for res in xrange(0, len(processes)):
f.write(return_dict[res])
print("Time: "+str(round(time.time()-st,2))+"s.")
print(str(max_processes)+" samples done in "+str(round(time.time()-st_init,2))+"s")
st_init = time.time()
processes = []
manager = Manager()
return_dict = manager.dict()
f.close()
return True
# This method is in charge to upload a json of variants to hbase ZZEr4RfUy1ZWmri
# We don't use compression as it is not really necessary as this script is executed from a server: at least 10Mo/s, * 36000 = 350Go/10h.
def uploadToHbase(patient, benchmark_table):
# We make a query to the cluster, asking him to download the file
info = {'database':database,'variants':current_server_url+'/cgs-41gre4gre4htrhtrthtjhty/hbase_upload_'+str(patient)+'_'+benchmark_table+'.txt','patient':patient}
upload_state = False
attempts = 0
while not upload_state is True:
r = querySession.get(cluster_url+'/variants/benchmarks/variant/import/'+benchmark_table,params=info)
# We check the content
try:
result = json.loads(r.text)
upload_state = True
except:
with open('logs/error_upload_'+str(patient)+'_'+database+'_'+benchmark_table+'.txt', 'w') as outfile:
outfile.write(r.text)
upload_state = False
if not upload_state is True or str(result['status']) != '0':
print(patient+" Problem while uploading data. Result saved in logs/error_upload_"+str(patient)+"_"+database+"_"+benchmark_table+".txt")
attempts += 1
if attempts >= 3:
os.remove(server_directory+'/hbase_upload_'+str(patient)+'_'+benchmark_table+'.txt')
sys.exit('A problem occurred during the downloading... Please check your logs.')
upload_state = True
# We save the result of the query to log (especially the execution time, but I'm lazzy so it will be the complete json directly)
with open('logs/success_upload_'+database+'_'+benchmark_table+'.txt', 'a') as outfile:
outfile.write(str(patient)+" : "+json.dumps(result)+"\n")
# We delete the file previously generated -> not needed anymore
# os.remove(server_directory+'/hbase_upload_'+str(patient)+'_'+benchmark_table+'.txt')
return True
# This method allows to easily see if it is worth it to compress the data
def testIfCompressWorthIt(variants):
st = time.time()
t = json.dumps(variants)
print("Json dump: "+str(time.time()-st)+"s ("+str(len(t)/1024)+"ko).")
# We save the uncompress text
st = time.time()
with open('/var/www/html/benchmarks/hbase_upload.txt', 'w') as outfile:
outfile.write(t)
#json.dump(variants, outfile, sort_keys = True, ensure_ascii=False)
print("Json write: "+str(time.time()-st)+"s.")
method = "gzip"
if method == "bz2": # -> not worth it, it takes around 45s to compress 65Mo (->1.6Mo which was great), huge cpu usage for only 1 core. We could try to parallelized the stuff by compressing different files simultaneously but it's boring.
# We save the compressed text
st = time.time()
compressed = bz2.compress(t)
print("Json compress: "+str(time.time()-st)+"s.")
st = time.time()
with open('/var/www/html/benchmarks/hbase_upload.txt.bzip2', 'w') as outfile:
outfile.write(compressed)
#outfile.write(binascii.hexlify(compressed))
#json.dump(variants, outfile, sort_keys = True, ensure_ascii=False)
print("Json write: "+str(time.time()-st)+"s ("+str(len(t)/1024)+"ko).")
st = time.time()
with open('/var/www/html/benchmarks/hbase_upload.txt.bzip2', 'rb') as infile:
compressedRead = infile.read()
print("Json read compressed: "+str(time.time()-st)+"s ("+str(len(compressedRead)/1024)+"ko).")
st = time.time()
decompressed = bz2.decompress(compressedRead)
print("Json decompress: "+str(time.time()-st)+"s ("+str(len(decompressed)/1024)+"ko).")
elif method == "gzip": # -> interesting, around 6s to compress 65Mo to 2.6Mo.
# We save the compressed text
st = time.time()
f = gzip.open('/var/www/html/benchmarks/hbase_upload.txt.gz', 'wb')
f.write(t)
f.close()
print("Json compress and write: "+str(time.time()-st)+"s ("+str(os.path.getsize('/var/www/html/benchmarks/hbase_upload.txt.gz')/1024)+"ko).")
st = time.time()
f = gzip.open('/var/www/html/benchmarks/hbase_upload.txt.gz', 'rb')
decompressed = f.read()
f.close()
print("Json read and decompress: "+str(time.time()-st)+"s ("+str(len(decompressed)/1024)+"ko).")
return True
if __name__ == '__main__':
# We connect to the db
#highlander_connexion = MySQLdb.connect(host= highlander_host, user=highlander_user, passwd=highlander_password,db=highlander_database, cursorclass=MySQLdb.cursors.DictCursor, compress=False)
cur = 0
# sudo ip add add dev tun0 172.31.236.177/24 broadcast 172.31.236.178
# We count the data available in each analysis
analyses = [('small', 200, '20_2015_04_01_benchmarks_small'), ('medium', 1000,'21_2015_04_01_benchmarks_medium'),('big',5000,'22_2015_04_01_benchmarks_big')]#,('huge',25000,'23_2015_04_01_benchmarks_huge')]
starting_sample = 100
for analysis in analyses:
# For each sample we will download the data, then create a json from it, and upload it to hbase
if global_upload_state is False:
increment = 5000
else:
increment = 1
for sample in xrange(starting_sample + 1, starting_sample + analysis[1], increment):
current_sample = 'NA'+(str(sample).zfill(5))
increment = max(1,min(increment, starting_sample + analysis[1] - sample))
if isSampleDone(current_sample, global_upload_state):
continue
if global_upload_state is False:
# We download the data from Highlander
#if saveForLater(cur, current_sample, analysis[0]):
if saveToTSV(cur, sample, sample+increment, analysis[0]):
addSampleDone(current_sample, False)
else:
break
continue
elif isSampleDone(current_sample, False):
# If we are in the upload state, we upload the data if it was previously downloaded
print(current_sample+": Uploading data."),
st = time.time()
if uploadToHbase(current_sample, analysis[0]):
addSampleDone(current_sample)
print("Time: "+str(round(time.time()-st,2))+"s.")
else:
print("Time: "+str(round(time.time()-st,2))+"s.")
print(current_sample+": Uploading data -> Failed. ")
else:
print(current_sample+": variants not previously downloaded.")
continue
starting_sample += analysis[1] + 1
print("The end!")
# We close the connexion
#highlander_connexion.close()
|
jpoullet2000/cgs-benchmarks
|
hbase-benchmarks/hbase_import_process.py
|
Python
|
apache-2.0
| 21,976 |
#
# Licensed to Intel Corporation under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Intel Corporation licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from py4j.protocol import Py4JJavaError
from py4j.java_gateway import JavaObject
from py4j.java_gateway import is_instance_of
from py4j.java_collections import ListConverter, JavaArray, JavaList
from pyspark import RDD, SparkContext
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import DataFrame, SQLContext
from pyspark.mllib.common import callJavaFunc
from pyspark import SparkConf
if sys.version >= '3':
long = int
unicode = str
class JavaValue(object):
def jvm_class_constructor(self):
name = "create" + self.__class__.__name__
print("creating: " + name)
return name
def __init__(self, bigdl_type="float", *args):
self.value = callBigDlFunc(bigdl_type, self.jvm_class_constructor(),
*args)
self.bigdl_type = bigdl_type
class InferenceResult():
def __init__(self, result, count, method):
self.result = result
self.count = count
self.method = method
def __reduce__(self):
return (InferenceResult, (self.result, self.count, self.method))
def __str__(self):
return "result: %s, count: %s, method: %s" % (
self.result, self.count, self.method)
class PySample(object):
def __init__(self, features, label, features_shape, label_shape,
bigdl_type="float"):
self.features = features
self.label = label
self.features_shape = features_shape
self.label_shape = label_shape
self.bigdl_type = bigdl_type
# features is a ndarray
# label is a ndarray
@classmethod
def from_ndarray(cls, features, label, bigdl_type="float"):
return cls(
features=[float(i) for i in features.ravel()],
label=[float(i) for i in label.ravel()],
features_shape=list(features.shape),
label_shape=list(label.shape) if label.shape else [label.size],
bigdl_type=bigdl_type)
@classmethod
def of(cls, features, label, features_shape, bigdl_type="float"):
return cls(
features=[float(i) for i in features],
label=[float(label)],
features_shape=features_shape,
label_shape=[1],
bigdl_type=bigdl_type)
def __reduce__(self):
return (PySample, (
self.features, self.label, self.features_shape, self.label_shape,
self.bigdl_type))
def __str__(self):
return "features: %s, label: %s," \
"features_shape: %s, label_shape: %s, bigdl_type: %s" % (
self.features, self.label, self.features_shape,
self.label_shape,
self.bigdl_type)
_picklable_classes = [
'LinkedList',
'SparseVector',
'DenseVector',
'DenseMatrix',
'Rating',
'LabeledPoint',
'PySample',
'TestResult'
]
def initEngine(nodeNum, coreNum, bigdl_type="float"):
callBigDlFunc(bigdl_type, "initEngine", nodeNum, coreNum)
def calc_spark_conf(coreNum, nodeNum):
print("coreNum:%s, nodeNum: %s" % (coreNum, nodeNum))
sparkConf = SparkConf()
sparkConf.setExecutorEnv("DL_ENGINE_TYPE", "mklblas")
sparkConf.setExecutorEnv("MKL_DISABLE_FAST_MM", "1")
sparkConf.setExecutorEnv("KMP_BLOCKTIME", "0")
sparkConf.setExecutorEnv("OMP_WAIT_POLICY", "passive")
sparkConf.setExecutorEnv("OMP_NUM_THREADS", "1")
sparkConf.setExecutorEnv("DL_CORE_NUMBER", str(coreNum))
sparkConf.setExecutorEnv("DL_NODE_NUMBER", str(nodeNum))
sparkConf.set("spark.shuffle.blockTransferService", "nio")
sparkConf.set("spark.scheduler.minRegisteredResourcesRatio", "1.0")
return sparkConf
def callBigDlFunc(bigdl_type, name, *args):
""" Call API in PythonBigDLAPI """
sc = SparkContext.getOrCreate()
if bigdl_type == "float":
api = getattr(
sc._jvm.com.intel.analytics.bigdl.python.api.PythonBigDLAPI.ofFloat(),
name)
elif bigdl_type == "double":
api = getattr(
sc._jvm.com.intel.analytics.bigdl.python.api.PythonBigDLAPI.ofDouble(),
name)
else:
raise Exception("Not supported bigdl_type: %s" % bigdl_type)
return callJavaFunc(sc, api, *args)
def _java2py(sc, r, encoding="bytes"):
if isinstance(r, JavaObject):
clsName = r.getClass().getSimpleName()
# convert RDD into JavaRDD
if clsName != 'JavaRDD' and clsName.endswith("RDD"):
r = r.toJavaRDD()
clsName = 'JavaRDD'
if clsName == 'JavaRDD':
jrdd = sc._jvm.SerDe.javaToPython(r)
return RDD(jrdd, sc)
if clsName == 'DataFrame':
return DataFrame(r, SQLContext.getOrCreate(sc))
if is_instance_of(sc._gateway, r,
"com.intel.analytics.bigdl.nn.Container"):
from optim.optimizer import Model
return Model.of(r)
if clsName in _picklable_classes:
r = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(r)
elif isinstance(r, (JavaArray, JavaList)):
try:
r = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.dumps(
r)
except Py4JJavaError:
pass # not pickable
if isinstance(r, (bytearray, bytes)):
r = PickleSerializer().loads(bytes(r), encoding=encoding)
return r
def callJavaFunc(sc, func, *args):
""" Call Java Function """
args = [_py2java(sc, a) for a in args]
result = func(*args)
return _java2py(sc, result)
def _to_java_object_rdd(rdd):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever
the RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return \
rdd.ctx._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.pythonToJava(
rdd._jrdd, True)
def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, (list, tuple)):
obj = ListConverter().convert([_py2java(sc, x) for x in obj],
sc._gateway._gateway_client)
elif isinstance(obj, JavaValue):
obj = obj.value
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = sc._jvm.org.apache.spark.bigdl.api.python.BigDLSerDe.loads(data)
return obj
|
zhichao-li/BigDL
|
dl/src/main/python/util/common.py
|
Python
|
apache-2.0
| 7,567 |
"""Support for Ecovacs Ecovacs Vaccums."""
from __future__ import annotations
import logging
import sucks
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import ECOVACS_DEVICES
_LOGGER = logging.getLogger(__name__)
SUPPORT_ECOVACS = (
SUPPORT_BATTERY
| SUPPORT_RETURN_HOME
| SUPPORT_CLEAN_SPOT
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_LOCATE
| SUPPORT_STATUS
| SUPPORT_SEND_COMMAND
| SUPPORT_FAN_SPEED
)
ATTR_ERROR = "error"
ATTR_COMPONENT_PREFIX = "component_"
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Ecovacs vacuums."""
vacuums = []
for device in hass.data[ECOVACS_DEVICES]:
vacuums.append(EcovacsVacuum(device))
_LOGGER.debug("Adding Ecovacs Vacuums to Home Assistant: %s", vacuums)
add_entities(vacuums, True)
class EcovacsVacuum(VacuumEntity):
"""Ecovacs Vacuums such as Deebot."""
def __init__(self, device):
"""Initialize the Ecovacs Vacuum."""
self.device = device
self.device.connect_and_wait_until_ready()
if self.device.vacuum.get("nick") is not None:
self._name = str(self.device.vacuum["nick"])
else:
# In case there is no nickname defined, use the device id
self._name = str(format(self.device.vacuum["did"]))
self._fan_speed = None
self._error = None
_LOGGER.debug("Vacuum initialized: %s", self.name)
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
self.device.statusEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.batteryEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.lifespanEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.errorEvents.subscribe(self.on_error)
def on_error(self, error):
"""Handle an error event from the robot.
This will not change the entity's state. If the error caused the state
to change, that will come through as a separate on_status event
"""
if error == "no_error":
self._error = None
else:
self._error = error
self.hass.bus.fire(
"ecovacs_error", {"entity_id": self.entity_id, "error": error}
)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state."""
return False
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return self.device.vacuum.get("did")
@property
def is_on(self):
"""Return true if vacuum is currently cleaning."""
return self.device.is_cleaning
@property
def is_charging(self):
"""Return true if vacuum is currently charging."""
return self.device.is_charging
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_ECOVACS
@property
def status(self):
"""Return the status of the vacuum cleaner."""
return self.device.vacuum_status
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
self.device.run(sucks.Charge())
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
return icon_for_battery_level(
battery_level=self.battery_level, charging=self.is_charging
)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.device.battery_status is not None:
return self.device.battery_status * 100
return super().battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
return self.device.fan_speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [sucks.FAN_SPEED_NORMAL, sucks.FAN_SPEED_HIGH]
def turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning."""
self.device.run(sucks.Clean())
def turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home."""
self.return_to_base()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
self.device.run(sucks.Stop())
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
self.device.run(sucks.Spot())
def locate(self, **kwargs):
"""Locate the vacuum cleaner."""
self.device.run(sucks.PlaySound())
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if self.is_on:
self.device.run(sucks.Clean(mode=self.device.clean_status, speed=fan_speed))
def send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
self.device.run(sucks.VacBotCommand(command, params))
@property
def extra_state_attributes(self):
"""Return the device-specific state attributes of this vacuum."""
data = {}
data[ATTR_ERROR] = self._error
for key, val in self.device.components.items():
attr_name = ATTR_COMPONENT_PREFIX + key
data[attr_name] = int(val * 100)
return data
|
mezz64/home-assistant
|
homeassistant/components/ecovacs/vacuum.py
|
Python
|
apache-2.0
| 6,203 |
# -*- coding: utf-8 -*-
# Main kivy import
import kivy
# Additional kivy imports
from kivy.app import App
from kivy.config import Config
# Screens
from screens import screenmanager
from screens.mainscreen import MainScreen
from screens.ingamescreen import IngameScreen
# Cause program to end if the required kivy version is not installed
kivy.require('1.8.0')
__author__ = 'ohaz'
# ---------------
# Config settings
# ---------------
# Multitouch emulation creates red dots on the screen. We don't need multitouch, so we disable it
Config.set('input', 'mouse', 'mouse,disable_multitouch')
# ---------------------
# Local initialisations
# ---------------------
# Initialise the screen manager (screens/screenmanager.py)
screenmanager.init()
# Add the two screens to it
screenmanager.set_screens([MainScreen(name='main_menu'), IngameScreen(name='ingame')])
# Start with the main menu screen
screenmanager.change_to('main_menu')
class ColoursApp(App):
"""
The main Class.
Only needed for the build function.
"""
def build(self):
"""
Method to build the app.
:return: the screenmanager
"""
return screenmanager.get_sm()
# Create the app and run it
if __name__ == '__main__':
ColoursApp().run()
|
ohaz/Colours
|
main.py
|
Python
|
apache-2.0
| 1,269 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import unittest
from earo.event import Event, Field
from earo.handler import Handler, Emittion, NoEmittion
from earo.mediator import Mediator
from earo.context import Context
from earo.processor import Processor, ProcessFlow
from earo.diagram import Diagram
class TestDiagram(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_active_process_flow(self):
mediator = Mediator()
processor = Processor('.+')
class EventA(Event):
event_a_field = Field(int, 100);
class EventB(Event):
event_b_field = Field(str, 'hello');
class EventC(Event):
event_c_field = Field(float, 1.1);
class EventD(Event):
event_d_field = Field(dict, {'x': 3, 'y': 4});
class EventE(Event):
event_e_field = Field(list, [3, 8, 7]);
def fooA_BC(context, event):
import time
time.sleep(0.5)
return (Emittion(EventB()), NoEmittion(EventC, 'Test No Emmittion EventC'))
def fooA(context, event):
pass
def fooB_D(context, event):
return Emittion(EventD())
def fooC(context, event):
pass
def fooD(context, event):
1 / 0
handler_1 = Handler(EventA, fooA_BC, [EventB, EventC])
handler_2 = Handler(EventA, fooA)
handler_3 = Handler(EventB, fooB_D, [EventD])
handler_4 = Handler(EventC, fooC)
handler_5 = Handler(EventD, fooD)
mediator.register_event_handler(
handler_1,
handler_2,
handler_3,
handler_4,
handler_5
)
context = Context(mediator, EventA(), processor)
context.process()
process_flow = context.process_flow
diagram = Diagram(process_flow=process_flow)
self.assertIsNotNone(diagram.json)
def test_inactive_process_flow(self):
mediator = Mediator()
class EventA(Event):
event_a_field = Field(int, 100);
class EventB(Event):
event_b_field = Field(str, 'hello');
class EventC(Event):
event_c_field = Field(float, 1.1);
class EventD(Event):
event_d_field = Field(dict, {'x': 3, 'y': 4});
def fooBC(context, event):
return (Emittion(EventB()), Emittion(EventC()))
def fooD(context, event):
return Emittion(EventD())
def foo(context, event):
pass
def fooEx(context, event):
1 / 0
handler_1 = Handler(EventA, fooBC, [EventB, EventC])
handler_2 = Handler(EventA, foo)
handler_3 = Handler(EventB, fooD, [EventD])
handler_4 = Handler(EventC, foo)
handler_5 = Handler(EventD, fooEx)
mediator.register_event_handler(
handler_1,
handler_2,
handler_3,
handler_4,
handler_5
)
process_flow = ProcessFlow(mediator, EventA)
diagram = Diagram(process_flow=process_flow)
self.assertIsNotNone(diagram.json)
def test_json(self):
mediator = Mediator()
class EventA(Event):
event_a_field = Field(int, 100);
class EventB(Event):
event_b_field = Field(str, 'hello');
class EventC(Event):
event_c_field = Field(float, 1.1);
class EventD(Event):
event_d_field = Field(dict, {'x': 3, 'y': 4});
def fooBC(context, event):
return (Emittion(EventB()), Emittion(EventC()))
def fooD(context, event):
return Emittion(EventD())
def foo(context, event):
pass
def fooEx(context, event):
1 / 0
handler_1 = Handler(EventA, fooBC, [EventB, EventC])
handler_2 = Handler(EventA, foo)
handler_3 = Handler(EventB, fooD, [EventD])
handler_4 = Handler(EventC, foo)
handler_5 = Handler(EventD, fooEx)
mediator.register_event_handler(
handler_1,
handler_2,
handler_3,
handler_4,
handler_5
)
process_flow = ProcessFlow(mediator, EventA)
diagram_from_process_flow = Diagram(process_flow=process_flow)
json = diagram_from_process_flow.json
diagram_from_json = Diagram(json=json)
self.assertIsNotNone(diagram_from_json.json)
if __name__ == '__main__':
unittest.main()
|
Everley1993/Laky-Earo
|
test/test_diagram.py
|
Python
|
apache-2.0
| 4,567 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from warehouse.admin.services import ISponsorLogoStorage
from warehouse.utils.static import ManifestCacheBuster
def includeme(config):
sponsorlogos_storage_class = config.maybe_dotted(
config.registry.settings["sponsorlogos.backend"]
)
config.register_service_factory(
sponsorlogos_storage_class.create_service, ISponsorLogoStorage
)
# Setup Jinja2 Rendering for the Admin application
config.add_jinja2_search_path("templates", name=".html")
# Setup our static assets
prevent_http_cache = config.get_settings().get("pyramid.prevent_http_cache", False)
config.add_static_view(
"admin/static",
"warehouse.admin:static/dist",
# Don't cache at all if prevent_http_cache is true, else we'll cache
# the files for 10 years.
cache_max_age=0 if prevent_http_cache else 10 * 365 * 24 * 60 * 60,
)
config.add_cache_buster(
"warehouse.admin:static/dist/",
ManifestCacheBuster(
"warehouse.admin:static/dist/manifest.json",
reload=config.registry.settings["pyramid.reload_assets"],
strict=not prevent_http_cache,
),
)
config.whitenoise_add_files("warehouse.admin:static/dist/", prefix="/admin/static/")
config.whitenoise_add_manifest(
"warehouse.admin:static/dist/manifest.json", prefix="/admin/static/"
)
# Add our routes
config.include(".routes")
# Add our flags
config.include(".flags")
|
pypa/warehouse
|
warehouse/admin/__init__.py
|
Python
|
apache-2.0
| 2,024 |
from gui_items import *
from objects.xml.xml_step import Step
class ActionPanel:
def __init__(self, display):
self.steps = []
self.display = display
# new ACTION panel (textbox met draaien, knop voor het uitvoeren van de draaien)
self.panel = self.display.gui_items.add_panel(450, 390, (300, 300))
cube_action_gui_items = GuiItems(display, self.display.cube_gui, self.panel)
main_sizer = cube_action_gui_items.gen_box_sizer(wx.HORIZONTAL)
axes_sizer = cube_action_gui_items.gen_box_sizer(wx.VERTICAL)
output_sizer = cube_action_gui_items.gen_box_sizer(wx.VERTICAL)
# uitvoer draaien knop
cube_action_button = cube_action_gui_items.gen_button("Run actions.", 20, 20)
cube_action_button.btn_id = 'run'
cube_action_button.Bind(wx.EVT_BUTTON, lambda event: self._button_run())
# reset textbox button
cube_reset_textbox_button = cube_action_gui_items.gen_button("Reset actions.", 30, 30)
cube_reset_textbox_button.Bind(wx.EVT_BUTTON, lambda event: self._button_reset())
# textbox met draaien
self.cube_action_textbox = cube_action_gui_items.gen_textbox(10, 10, (200, -1), (wx.TE_MULTILINE))
# dropdown for selecting cube row
combo_box_items = []
for size in range(self.display._storage.cube_size):
combo_box_items.append(str(size+1))
self.action_combo_box = cube_action_gui_items.gen_combobox((150, 10), (150, -1), combo_box_items)
self.action_combo_box.SetSelection(0)
# turnable checkbox(clockwise, counterclockwise)
cube_turnable_checkbox = cube_action_gui_items.gen_radiobox(20, 20, (100, 100), wx.RA_SPECIFY_ROWS,
['CounterClockwise', 'Clockwise'])
# buttons voor het draaien (MET BIND)
x_button = cube_action_gui_items.gen_button("Voer X in", 0, 0)
x_button.btn_id = 'x'
y_button = cube_action_gui_items.gen_button("Voer Y in", 0, 0)
y_button.btn_id = 'y'
z_button = cube_action_gui_items.gen_button("Voer Z in", 0, 0)
z_button.btn_id = 'z'
x_button.Bind(wx.EVT_BUTTON, lambda event: self._button_x_y_z('x', self.action_combo_box.GetValue(),
cube_turnable_checkbox.GetSelection()))
y_button.Bind(wx.EVT_BUTTON, lambda event: self._button_x_y_z('y', self.action_combo_box.GetValue(),
cube_turnable_checkbox.GetSelection()))
z_button.Bind(wx.EVT_BUTTON, lambda event: self._button_x_y_z('z', self.action_combo_box.GetValue(),
cube_turnable_checkbox.GetSelection()))
# undo button
undo_button = cube_action_gui_items.gen_button("Undo last input", 0,0)
undo_button.Bind(wx.EVT_BUTTON, self.__undo)
# add elements to box_sizers
output_sizer.Add(self.cube_action_textbox, 0, wx.ALL, 5)
output_sizer.Add(cube_action_button, 0, wx.ALL, 5)
output_sizer.Add(cube_reset_textbox_button, 0, wx.ALL, 5)
output_sizer.Add(undo_button, 0, wx.ALL, 5)
axes_sizer.Add(x_button, 0, wx.ALL, 1)
axes_sizer.Add(y_button, 0, wx.ALL, 1)
axes_sizer.Add(z_button, 0, wx.ALL, 1)
axes_sizer.Add(self.action_combo_box, 0, wx.ALL, 1)
axes_sizer.Add(cube_turnable_checkbox, 0, wx.ALL, 1)
main_sizer.Add(output_sizer)
main_sizer.Add(axes_sizer)
# set sizer to panel
self.panel.SetSizer(main_sizer)
self.panel.Layout()
# hide panel
self.panel.Hide()
def __undo(self, event):
counter = 1
textbox_items = ""
splitted_inserted_text = self.cube_action_textbox.GetValue().split(';')
for current_split in splitted_inserted_text:
if counter < len(splitted_inserted_text):
textbox_items += ";" + current_split
counter += 1
# change textbox value
self.cube_action_textbox.Clear()
self.cube_action_textbox.AppendText(textbox_items[1:]) # minus first ; char
def _button_run(self):
self.read_steps()
self.display._storage.current_cube.execute_steps(self.steps)
def read_steps(self):
self.steps = []
text = str(self.cube_action_textbox.GetValue())
if not text == "":
for current_split in text.split(';'):
var_split = current_split.split(',')
self.steps.append(Step(var_split[0], int(var_split[1])-1, int(var_split[2])))
print var_split
def _button_reset(self):
self.steps = []
self.cube_action_textbox.Clear()
def _reset_textbox(self):
self.cube_action_textbox.Clear()
for step in self.steps:
self.cube_action_textbox.AppendText(";" + str(step.axis) + "," + str(step.rows) + "," + str(step.direction))
def _button_x_y_z(self, axis, row, direction):
if direction == 0:
direction = -1
if len(self.cube_action_textbox.GetValue()) == 0:
self.cube_action_textbox.AppendText(str(axis) + "," + str(row) + "," + str(direction))
else:
self.cube_action_textbox.AppendText(";" + str(axis) + "," + str(row) + "," + str(direction))
|
Willempie/Artificial_Intelligence_Cube
|
logic/handling/panel_action.py
|
Python
|
apache-2.0
| 5,455 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pwnlib
def handle_pow(r):
print(r.recvuntil(b'python3 '))
print(r.recvuntil(b' solve '))
challenge = r.recvline().decode('ascii').strip()
p = pwnlib.tubes.process.process(['kctf_bypass_pow', challenge])
solution = p.readall().strip()
r.sendline(solution)
print(r.recvuntil(b'Correct\n'))
r = pwnlib.tubes.remote.remote('127.0.0.1', 1337)
print(r.recvuntil('== proof-of-work: '))
if r.recvline().startswith(b'enabled'):
handle_pow(r)
print(r.recvuntil(b'CTF{'))
print(r.recvuntil(b'}'))
exit(0)
|
google/google-ctf
|
2021/quals/kctf/challenge-templates/pwn/healthcheck/healthcheck.py
|
Python
|
apache-2.0
| 1,160 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-03-03
Last_modify: 2016-03-03
******************************************
'''
'''
Reverse a linked list from position m to n.
Do it in-place and in one-pass.
For example:
Given 1->2->3->4->5->NULL, m = 2 and n = 4,
return 1->4->3->2->5->NULL.
Note:
Given m, n satisfy the following condition:
1 ≤ m ≤ n ≤ length of list.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseBetween(self, head, m, n):
"""
:type head: ListNode
:type m: int
:type n: int
:rtype: ListNode
"""
dummy = pre = ListNode(0)
dummy.next = head
for i in range(m-1):
pre = pre.next
start = pre.next
then = start.next
for i in range(n - m):
start.next = then.next
then.next = pre.next
pre.next = then
then = start.next
return dummy.next
|
zhlinh/leetcode
|
0092.Reverse Linked List II/solution.py
|
Python
|
apache-2.0
| 1,199 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.logs_api import LogsApi
class TestLogsApi(unittest.TestCase):
""" LogsApi unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.logs_api.LogsApi()
def tearDown(self):
pass
def test_log_file_handler(self):
"""
Test case for log_file_handler
"""
pass
def test_log_file_list_handler(self):
"""
Test case for log_file_list_handler
"""
pass
if __name__ == '__main__':
unittest.main()
|
skuda/client-python
|
kubernetes/test/test_logs_api.py
|
Python
|
apache-2.0
| 964 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import unittest
from unittest import mock
from airflow.executors.local_executor import LocalExecutor
from airflow.utils.state import State
class TestLocalExecutor(unittest.TestCase):
TEST_SUCCESS_COMMANDS = 5
def execution_parallelism(self, parallelism=0):
executor = LocalExecutor(parallelism=parallelism)
executor.start()
success_key = 'success {}'
success_command = ['true', 'some_parameter']
fail_command = ['false', 'some_parameter']
self.assertTrue(executor.result_queue.empty())
execution_date = datetime.datetime.now()
for i in range(self.TEST_SUCCESS_COMMANDS):
key_id, command = success_key.format(i), success_command
key = key_id, 'fake_ti', execution_date, 0
executor.running.add(key)
executor.execute_async(key=key, command=command)
fail_key = 'fail', 'fake_ti', execution_date, 0
executor.running.add(fail_key)
executor.execute_async(key=fail_key, command=fail_command)
executor.end()
# By that time Queues are already shutdown so we cannot check if they are empty
self.assertEqual(len(executor.running), 0)
for i in range(self.TEST_SUCCESS_COMMANDS):
key_id = success_key.format(i)
key = key_id, 'fake_ti', execution_date, 0
self.assertEqual(executor.event_buffer[key], State.SUCCESS)
self.assertEqual(executor.event_buffer[fail_key], State.FAILED)
expected = self.TEST_SUCCESS_COMMANDS + 1 if parallelism == 0 else parallelism
self.assertEqual(executor.workers_used, expected)
def test_execution_unlimited_parallelism(self):
self.execution_parallelism(parallelism=0)
def test_execution_limited_parallelism(self):
test_parallelism = 2
self.execution_parallelism(parallelism=test_parallelism)
@mock.patch('airflow.executors.local_executor.LocalExecutor.sync')
@mock.patch('airflow.executors.base_executor.BaseExecutor.trigger_tasks')
@mock.patch('airflow.stats.Stats.gauge')
def test_gauge_executor_metrics(self, mock_stats_gauge, mock_trigger_tasks, mock_sync):
executor = LocalExecutor()
executor.heartbeat()
calls = [mock.call('executor.open_slots', mock.ANY),
mock.call('executor.queued_tasks', mock.ANY),
mock.call('executor.running_tasks', mock.ANY)]
mock_stats_gauge.assert_has_calls(calls)
if __name__ == '__main__':
unittest.main()
|
Fokko/incubator-airflow
|
tests/executors/test_local_executor.py
|
Python
|
apache-2.0
| 3,328 |
from selenium.webdriver.common.by import By
from SeleniumPythonFramework.src.main.Pages.CommonPage import CommonPage
# Production locations
TRY_TEXT = {"by": By.ID, "locator": "url-input"}
TRY_BUTTON = {"by": By.ID, "locator": "get-data"}
PATH = ""
class HomePage(CommonPage):
def __init__(self, **kwargs):
super(HomePage, self).__init__(page_url=PATH, **kwargs)
def try_url_text(self):
return self.get_element(TRY_TEXT)
def try_url_button(self):
return self.get_element(TRY_BUTTON)
def try_url(self, url):
self.try_url_text().send_keys(url)
try_button = self.try_url_button()
with self.wait_for_page_load:
try_button.click()
|
GinoGalotti/python-selenium-utils
|
SeleniumPythonFramework/src/main/Pages/HomePage.py
|
Python
|
apache-2.0
| 710 |
# -*- coding: utf-8 -*-
# Copyright (c) 2004-2014 Alterra, Wageningen-UR
# Allard de Wit ([email protected]), April 2014
from collections import namedtuple
from math import exp
from ..traitlets import Float, Int, Instance, AfgenTrait
from ..decorators import prepare_rates, prepare_states
from ..base_classes import ParamTemplate, StatesTemplate, SimulationObject,\
VariableKiosk
from .. import exceptions as exc
from warnings import warn
# Template for namedtuple containing partitioning factors
class PartioningFactors(namedtuple("partitioning_factors", "FR FL FS FO")):
pass
class DVS_Partitioning(SimulationObject):
"""Class for assimilate partioning based on development stage (`DVS`).
`DVS_partioning` calculates the partitioning of the assimilates to roots,
stems, leaves and storage organs using fixed partitioning tables as a
function of crop development stage. The available assimilates are first
split into below-ground and abovegrond using the values in FRTB. In a
second stage they are split into leaves (`FLTB`), stems (`FSTB`) and storage
organs (`FOTB`).
Since the partitioning fractions are derived from the state variable `DVS`
they are regarded state variables as well.
**Simulation parameters** (To be provided in cropdata dictionary):
======= ============================================= ======= ============
Name Description Type Unit
======= ============================================= ======= ============
FRTB Partitioning to roots as a function of TCr -
development stage.
FSTB Partitioning to stems as a function of TCr -
development stage.
FLTB Partitioning to leaves as a function of TCr -
development stage.
FOTB Partitioning to storage organs as a function TCr -
of development stage.
======= ============================================= ======= ============
**State variables**
======= ================================================= ==== ============
Name Description Pbl Unit
======= ================================================= ==== ============
FR Fraction partitioned to roots. Y -
FS Fraction partitioned to stems. Y -
FL Fraction partitioned to leaves. Y -
FO Fraction partitioned to storage orgains Y -
======= ================================================= ==== ============
**Rate variables**
None
**Signals send or handled**
None
**External dependencies:**
======= =================================== ================= ============
Name Description Provided by Unit
======= =================================== ================= ============
DVS Crop development stage DVS_Phenology -
======= =================================== ================= ============
*Exceptions raised*
A PartitioningError is raised if the partitioning coefficients to leaves,
stems and storage organs on a given day do not add up to '1'.
"""
class Parameters(ParamTemplate):
FRTB = AfgenTrait()
FLTB = AfgenTrait()
FSTB = AfgenTrait()
FOTB = AfgenTrait()
class StateVariables(StatesTemplate):
FR = Float(-99.)
FL = Float(-99.)
FS = Float(-99.)
FO = Float(-99.)
PF = Instance(PartioningFactors)
def initialize(self, day, kiosk, parvalues):
"""
:param day: start date of the simulation
:param kiosk: variable kiosk of this PCSE instance
:param parvalues: `ParameterProvider` object providing parameters as
key/value pairs
"""
self.params = self.Parameters(parvalues)
self.kiosk = kiosk
# initial partitioning factors (pf)
DVS = self.kiosk["DVS"]
FR = self.params.FRTB(DVS)
FL = self.params.FLTB(DVS)
FS = self.params.FSTB(DVS)
FO = self.params.FOTB(DVS)
# Pack partitioning factors into tuple
PF = PartioningFactors(FR, FL, FS, FO)
# Initial states
self.states = self.StateVariables(kiosk, publish=["FR","FL","FS","FO"],
FR=FR, FL=FL, FS=FS, FO=FO, PF=PF)
self._check_partitioning()
def _check_partitioning(self):
"""Check for partitioning errors."""
FR = self.states.FR
FL = self.states.FL
FS = self.states.FS
FO = self.states.FO
checksum = FR+(FL+FS+FO)*(1.-FR) - 1.
if abs(checksum) >= 0.0001:
msg = ("Error in partitioning!\n")
msg += ("Checksum: %f, FR: %5.3f, FL: %5.3f, FS: %5.3f, FO: %5.3f\n" \
% (checksum, FR, FL, FS, FO))
self.logger.error(msg)
warn(msg)
# raise exc.PartitioningError(msg)
@prepare_states
def integrate(self, day, delt=1.0):
"""Update partitioning factors based on development stage (DVS)"""
params = self.params
DVS = self.kiosk["DVS"]
self.states.FR = params.FRTB(DVS)
self.states.FL = params.FLTB(DVS)
self.states.FS = params.FSTB(DVS)
self.states.FO = params.FOTB(DVS)
# Pack partitioning factors into tuple
self.states.PF = PartioningFactors(self.states.FR, self.states.FL,
self.states.FS, self.states.FO)
self._check_partitioning()
def calc_rates(self, day, drv):
""" Return partitioning factors based on current DVS.
"""
# rate calculation does nothing for partioning as it is a derived
# state
return self.states.PF
class DVS_Partitioning_NPK(SimulationObject):
"""Class for assimilate partitioning based on development stage (`DVS`)
with influence of NPK stress.
`DVS_Partitioning_NPK` calculates the partitioning of the assimilates to roots,
stems, leaves and storage organs using fixed partitioning tables as a
function of crop development stage. The only different with the normal
partitioning class is the effect of nitrogen stress on partitioning to
leaves (parameter NPART). The available assimilates are first
split into below-ground and aboveground using the values in FRTB. In a
second stage they are split into leaves (`FLTB`), stems (`FSTB`) and storage
organs (`FOTB`).
Since the partitioning fractions are derived from the state variable `DVS`
they are regarded state variables as well.
**Simulation parameters** (To be provided in cropdata dictionary):
======= ============================================= ======= ============
Name Description Type Unit
======= ============================================= ======= ============
FRTB Partitioning to roots as a function of TCr -
development stage.
FSTB Partitioning to stems as a function of TCr -
development stage.
FLTB Partitioning to leaves as a function of TCr -
development stage.
FOTB Partitioning to starge organs as a function TCr -
of development stage.
NPART Coefficient for the effect of N stress on SCR -
leaf biomass allocation
======= ============================================= ======= ============
**State variables**
======= ================================================= ==== ============
Name Description Pbl Unit
======= ================================================= ==== ============
FR Fraction partitioned to roots. Y -
FS Fraction partitioned to stems. Y -
FL Fraction partitioned to leaves. Y -
FO Fraction partitioned to storage orgains Y -
======= ================================================= ==== ============
**Rate variables**
None
**Signals send or handled**
None
**External dependencies:**
======= =================================== ================= ============
Name Description Provided by Unit
======= =================================== ================= ============
DVS Crop development stage DVS_Phenology -
TRA Actual transpiration Simple_Evapotranspiration mm d-1
TRAMX Maximum transpiration Simple_Evapotranspiration mm d-1
NNI Nitrogen nutrition index npk_dynamics -
======= =================================== ================= ============
*Exceptions raised*
A PartitioningError is raised if the partitioning coefficients to leaves,
stems and storage organs on a given day do not add up to '1'.
"""
class Parameters(ParamTemplate):
FRTB = AfgenTrait()
FLTB = AfgenTrait()
FSTB = AfgenTrait()
FOTB = AfgenTrait()
NPART = Float(-99.) # coefficient for the effect of N stress on leaf allocation
class StateVariables(StatesTemplate):
FR = Float(-99.)
FL = Float(-99.)
FS = Float(-99.)
FO = Float(-99.)
PF = Instance(PartioningFactors)
def initialize(self, day, kiosk, cropdata):
"""
:param day: start date of the simulation
:param kiosk: variable kiosk of this PCSE instance
:param cropdata: dictionary with WOFOST cropdata key/value pairs
"""
self.params = self.Parameters(cropdata)
self.kiosk = kiosk
# initial partioning factors (pf)
DVS = self.kiosk["DVS"]
FR = self.params.FRTB(DVS)
FL = self.params.FLTB(DVS)
FS = self.params.FSTB(DVS)
FO = self.params.FOTB(DVS)
# Pack partitioning factors into tuple
PF = PartioningFactors(FR, FL, FS, FO)
# Initial states
self.states = self.StateVariables(kiosk, publish=["FR","FL","FS","FO"],
FR=FR, FL=FL, FS=FS, FO=FO, PF=PF)
self._check_partitioning()
def _check_partitioning(self):
"""Check for partitioning errors."""
FR = self.states.FR
FL = self.states.FL
FS = self.states.FS
FO = self.states.FO
checksum = FR+(FL+FS+FO)*(1.-FR) - 1.
if abs(checksum) >= 0.0001:
msg = ("Error in partitioning!\n")
msg += ("Checksum: %f, FR: %5.3f, FL: %5.3f, FS: %5.3f, FO: %5.3f\n" \
% (checksum, FR, FL, FS, FO))
self.logger.error(msg)
raise exc.PartitioningError(msg)
@prepare_states
def integrate(self, day, delt=1.0):
"""
Update partitioning factors based on development stage (DVS)
and the Nitrogen nutrition Index (NNI)
"""
params = self.params
states = self.states
DVS = self.kiosk["DVS"]
TRA = self.kiosk["TRA"]
TRAMX = self.kiosk["TRAMX"]
NNI = self.kiosk["NNI"]
TRANRF = TRA/TRAMX
if TRANRF < NNI:
# Water stress is more severe than nitrogen stress and the
# partitioning follows the original LINTUL2 assumptions
# Note: we use specifically nitrogen stress not nutrient stress!!!
FRTMOD = max( 1., 1./(TRANRF+0.5))
states.FR = min(0.6, params.FRTB(DVS) * FRTMOD)
states.FL = params.FLTB(DVS)
states.FS = params.FSTB(DVS)
states.FO = params.FOTB(DVS)
else:
# Nitrogen stress is more severe than water stress resulting in
# less partitioning to leaves and more to stems
FLVMOD = exp(-params.NPART * (1.0-NNI))
states.FL = params.FLTB(DVS) * FLVMOD
states.FS = params.FSTB(DVS) + params.FLTB(DVS) - states.FL
states.FR = params.FRTB(DVS)
states.FO = params.FOTB(DVS)
# Pack partitioning factors into tuple
states.PF = PartioningFactors(states.FR, states.FL,
states.FS, states.FO)
self._check_partitioning()
def calc_rates(self, day, drv):
""" Return partitioning factors based on current DVS.
"""
# rate calculation does nothing for partioning as it is a derived
# state
return self.states.PF
|
jajberni/pcse_web
|
main/pcse/crop/partitioning.py
|
Python
|
apache-2.0
| 12,949 |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
##
## Run selected tests from test_pep380 from StdLib
##
import unittest
import sys
from iptest import run_test
import test.test_pep380
def load_tests(loader, standard_tests, pattern):
if sys.implementation.name == 'ironpython':
suite = unittest.TestSuite()
suite.addTest(test.test_pep380.TestPEP380Operation('test_attempted_yield_from_loop'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_attempting_to_send_to_non_generator'))
suite.addTest(unittest.expectedFailure(test.test_pep380.TestPEP380Operation('test_broken_getattr_handling'))) # TODO: figure out
suite.addTest(unittest.expectedFailure(test.test_pep380.TestPEP380Operation('test_catching_exception_from_subgen_and_returning'))) # TODO: figure out
suite.addTest(test.test_pep380.TestPEP380Operation('test_close_with_cleared_frame'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_conversion_of_sendNone_to_next'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_custom_iterator_return'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegating_close'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegating_generators_claim_to_be_running'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegating_throw'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegating_throw_to_non_generator'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegation_of_close_to_non_generator'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegation_of_initial_next_to_subgenerator'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegation_of_next_call_to_subgenerator'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegation_of_next_to_non_generator'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegation_of_send'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_delegator_is_visible_to_debugger'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_exception_in_initial_next_call'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_exception_value_crash'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_generator_return_value'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_handing_exception_while_delegating_close'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_handling_exception_while_delegating_send'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_next_and_return_with_value'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_raising_exception_in_delegated_next_call'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_raising_exception_in_initial_next_call'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_returning_value_from_delegated_throw'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_send_and_return_with_value'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_send_tuple_with_custom_generator'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_throwing_GeneratorExit_into_subgen_that_raises'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_throwing_GeneratorExit_into_subgen_that_returns'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_throwing_GeneratorExit_into_subgenerator_that_yields'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_value_attribute_of_StopIteration_exception'))
suite.addTest(test.test_pep380.TestPEP380Operation('test_yield_from_empty'))
return suite
else:
return loader.loadTestsFromModule(test.test_pep380, pattern)
run_test(__name__)
|
IronLanguages/ironpython3
|
Tests/test_pep380_stdlib.py
|
Python
|
apache-2.0
| 4,035 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import codecs
import optparse
import os
import shutil
import sys
import tempfile
import xml.dom.minidom
from xml2po import Main # noqa
from xml2po.modes.docbook import docbookXmlMode # noqa
class myDocbookXmlMode(docbookXmlMode):
def __init__(self):
self.lists = ['itemizedlist', 'orderedlist', 'variablelist',
'segmentedlist', 'simplelist', 'calloutlist',
'varlistentry', 'userinput', 'computeroutput',
'prompt', 'command', 'screen']
self.objects = ['figure', 'textobject', 'imageobject', 'mediaobject',
'screenshot', 'literallayout', 'programlisting',
'option']
default_mode = 'docbook'
operation = 'merge'
xml_options = {
'mark_untranslated': False,
'expand_entities': True,
'expand_all_entities': False,
}
IGNORE_FOLDER = []
IGNORE_FILE = []
def mergeback(folder, language, root):
"""Generate translated files for language in directory folder."""
if folder is None:
path = root
else:
outputFiles = mergeSingleDocument(folder, language, root)
if (outputFiles is not None) and (len(outputFiles) > 0):
for outXML in outputFiles:
changeXMLLangSetting(outXML, language)
return
if not os.path.isdir(path):
return
files = os.listdir(path)
for aFile in files:
if not (aFile in IGNORE_FOLDER):
outputFiles = mergeSingleDocument(aFile, language, root)
if (outputFiles is not None) and (len(outputFiles) > 0):
for outXML in outputFiles:
changeXMLLangSetting(outXML, language)
def mergeSingleDocument(folder, language, root):
xmlfiles = []
outputfiles = []
abspath = os.path.join(root, folder)
if os.path.isdir(abspath):
os.path.walk(abspath, get_xml_list, xmlfiles)
else:
return None
if len(xmlfiles) > 0:
popath = os.path.join(abspath, "locale", language + ".po")
# generate MO file
mofile_handler, mofile_tmppath = tempfile.mkstemp()
os.close(mofile_handler)
os.system("msgfmt -o %s %s" % (mofile_tmppath, popath))
for aXML in xmlfiles:
# (filename, ext) = os.path.splitext(os.path.basename(aXML))
relpath = os.path.relpath(aXML, root)
outputpath = os.path.join(os.path.curdir, "generated", language,
relpath)
try:
xml2po_main = Main(default_mode, "merge", outputpath,
xml_options)
xml2po_main.current_mode = myDocbookXmlMode()
xml2po_main.merge(mofile_tmppath, aXML)
outputfiles.append(outputpath)
except IOError:
print("Error: cannot open aFile %s for writing.")
sys.exit(5)
except Exception:
print("Exception happen")
if mofile_tmppath:
os.remove(mofile_tmppath)
return outputfiles
def changeXMLLangSetting(xmlFile, language):
"""Update XML settings for file."""
# The mergeback breaks the ENTITIY title which should look like:
# <!DOCTYPE chapter [
# <!ENTITY % openstack SYSTEM "../common/entities/openstack.ent">
# %openstack;
# ]>
# The "%openstack;" gets removed, let's add it back first.
# NOTE(jaegerandi): This just handles the openstack ENTITY, if
# others are used, this needs to be generalized.
with open(xmlFile) as xml_file:
newxml = xml_file.read()
# Used in openstack-manuals:
newxml = newxml.replace(
'common/entities/openstack.ent">',
'common/entities/openstack.ent"> %openstack;')
# As used in security-doc and operations-guide
newxml = newxml.replace('SYSTEM "openstack.ent">',
'SYSTEM "openstack.ent"> %openstack;')
try:
dom = xml.dom.minidom.parseString(newxml)
except xml.parsers.expat.ExpatError as e:
print("Error: parsing of file '%s' for language '%s' "
"with Expat failed: %s." % (xmlFile, language, e))
sys.exit(5)
root = dom.documentElement
root.setAttribute("xml:lang", language[:2])
fileObj = codecs.open(xmlFile, "wb", encoding="utf-8")
nodelists = root.getElementsByTagName("link")
for node in nodelists:
if node.hasAttribute("href"):
node.setAttribute("xlink:href", node.getAttribute("href"))
if node.hasAttribute("title"):
node.setAttribute("xlink:title", node.getAttribute("title"))
dom.writexml(fileObj)
def get_xml_list(sms, dr, flst):
if (flst == "target") or (flst == "wadls"):
return
if dr.find("target") > -1:
return
if dr.find("wadls") > -1:
return
for f in flst:
if (f.endswith(".xml") and (f != "pom.xml") and
not (f in IGNORE_FILE)):
sms.append(os.path.join(dr, f))
def get_default_book(root):
return os.listdir(root)[0]
def generatedocbook():
global IGNORE_FOLDER, IGNORE_FILE
usage = "usage: %prog [options] command [cmd_options]"
description = "This is the tool to generate translated docbooks, which "
"will be stored in 'generated/[language]/"
IGNORE_FOLDER = ["docbkx-example"]
IGNORE_FILE = []
parser = optparse.OptionParser(
usage=usage, version="0.6", description=description
)
parser.disable_interspersed_args()
parser.add_option(
"-l", "--language", dest="language", help=("specified language")
)
parser.add_option(
"-b", "--book", dest="book",
help=("specified docbook")
)
parser.add_option(
"-r", "--root", dest="root", default="./doc",
help=("root directory")
)
(options, args) = parser.parse_args()
if options.language is None:
print("must specify language")
return
root = options.root
if options.book is None:
options.book = get_default_book(root)
# change working directory
# copy folders
folder = options.book
language = options.language
root = options.root
sourcepath = os.path.join(root, folder)
destpath = os.path.join(os.path.curdir, "generated", language)
if not os.path.exists(destpath):
os.makedirs(destpath)
destfolder = os.path.join(destpath, folder)
if os.path.exists(destfolder):
shutil.rmtree(destfolder)
os.system("cp -r %s %s" % (sourcepath, destpath))
mergeback(folder, language, root)
def generatePoT(folder, root):
if folder is None:
path = root
else:
generateSinglePoT(folder, root)
return
if not os.path.isdir(path):
return
files = os.listdir(path)
for aFile in files:
if not (aFile in IGNORE_FOLDER):
generateSinglePoT(aFile, root)
def generateSinglePoT(folder, root):
xmlfiles = []
abspath = os.path.join(root, folder)
if os.path.isdir(abspath):
os.path.walk(abspath, get_xml_list, xmlfiles)
else:
return
if len(xmlfiles) > 0:
output = os.path.join(abspath, "locale")
if not os.path.exists(output):
os.mkdir(output)
output = os.path.join(output, folder + ".pot")
try:
xml2po_main = Main(default_mode, "pot", output, xml_options)
xml2po_main.current_mode = myDocbookXmlMode()
except IOError:
print("Error: cannot open aFile %s for writing." % (output))
sys.exit(5)
# print(xmlfiles)
# print(">>>outout: %s ", output)
xml2po_main.to_pot(xmlfiles)
def generatepot():
global IGNORE_FOLDER, IGNORE_FILE
IGNORE_FOLDER = ["docbkx-example", "training-guide"]
IGNORE_FILE = ["api-examples.xml"]
try:
folder = sys.argv[1]
except Exception:
folder = None
try:
root = sys.argv[2]
except Exception:
root = "./doc"
generatePoT(folder, root)
|
savinash47/openstack-doc-tools
|
os_doc_tools/handle_pot.py
|
Python
|
apache-2.0
| 8,610 |
import modules.options_helper as opt_helper
from modules.file.file_helper import File
import sys
def main(options):
# available config keys
options_registry = ["path","find","replace_with"]
# verify config option provided match registry
opt_helper.check_options(options, options_registry)
path = options.get("path", False)
find = options.get("find", False)
replace_with = options.get("replace_with", False)
# see if all required fields are present
if path and find and replace_with:
f = File(path)
is_find_in_file = f.is_in_file(find)
filetype = f.get_ftype()
# only supporting files right now, no links, directories
if filetype == "file" and is_find_in_file:
# check if the change was applied already to avoid replacing duplicate lines if any
if f.is_in_file(replace_with) and is_find_in_file:
print "Will not replace. Looks like following is already in file " + path + ": " + replace_with
else:
print "Replacing content in file: " + path
f.replace_in_file(find, replace_with)
else:
if filetype != "file":
print "Can't run this playbook because provided 'path' is not a file, it's a " + filetype
# TODO: raise exception
sys.exit()
if not is_find_in_file:
print "Didn't find " + find + " in the file " + path + ". Nothing to replace."
if __name__ == '__main__':
main(options)
|
dkoudlo/py-manage-server
|
modules/file/replace.py
|
Python
|
apache-2.0
| 1,593 |
from blinky.appveyor import *
from blinky.circle import *
from blinky.github import *
from blinky.jenkins import *
from blinky.travis import *
http_port = 8080
model.title = "Test CI"
# Components
proton_c = Component(model, "Proton C")
# Environments
multiple = Environment(model, "Multiple OSes")
# Agents
github = GitHubAgent(model, "GitHub")
# Categories
client_tests = Category(model, "Clients", "client")
# Groups
group = Group(model, client_tests, "Proton C")
# To look up GitHub Actions workflow IDs:
# curl https://api.github.com/repos/apache/qpid-proton/actions/workflows
GitHubJob (model, group, proton_c, multiple, github, None, "apache/qpid-proton", "master", "Build", 2012003)
|
ssorj/blinky
|
misc/config.py
|
Python
|
apache-2.0
| 754 |
__author__ = "Rajiv Mayani"
import logging
import uuid
from decimal import Decimal
from enum import Enum
from flask.json import JSONEncoder
from sqlalchemy import inspect
from Pegasus.service.base import ErrorResponse, OrderedDict, PagedResponse
log = logging.getLogger(__name__)
class PegasusJsonEncoder(JSONEncoder):
"""JSON Encoder for Pegasus Service API Resources."""
def default(self, o):
"""."""
if isinstance(o, uuid.UUID):
return str(o)
elif isinstance(o, Decimal):
return float(o)
elif isinstance(o, Enum):
return o.name
elif isinstance(o, PagedResponse):
json_record = OrderedDict([("records", o.records)])
if o.total_records or o.total_filtered:
meta = OrderedDict()
if o.total_records is not None:
meta["records_total"] = o.total_records
if o.total_filtered is not None:
meta["records_filtered"] = o.total_filtered
json_record["_meta"] = meta
return json_record
elif isinstance(o, ErrorResponse):
json_record = OrderedDict([("code", o.code), ("message", o.message)])
if o.errors:
json_record["errors"] = [{"field": f, "errors": e} for f, e in o.errors]
return json_record
elif hasattr(o, "__json__"):
return o.__json__()
elif hasattr(o, "__table__"):
unloaded = inspect(o).unloaded
_v = {
k: getattr(o, k)
for k in o.__mapper__.column_attrs.keys()
if k not in unloaded
}
for k in getattr(o, "__includes__", {}):
_v[k] = getattr(o, k)
for k in getattr(o, "__excludes__", {}):
del _v[k]
return _v
return JSONEncoder.default(self, o)
|
pegasus-isi/pegasus
|
packages/pegasus-python/src/Pegasus/service/_encoder.py
|
Python
|
apache-2.0
| 1,941 |
"""a collection of Annotation-related models"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from flask_appbuilder import Model
from sqlalchemy import (
Column, DateTime, ForeignKey, Index, Integer, String, Text,
)
from sqlalchemy.orm import relationship
from superset.models.helpers import AuditMixinNullable
class AnnotationLayer(Model, AuditMixinNullable):
"""A logical namespace for a set of annotations"""
__tablename__ = 'annotation_layer'
id = Column(Integer, primary_key=True)
name = Column(String(250))
descr = Column(Text)
def __repr__(self):
return self.name
class Annotation(Model, AuditMixinNullable):
"""Time-related annotation"""
__tablename__ = 'annotation'
id = Column(Integer, primary_key=True)
start_dttm = Column(DateTime)
end_dttm = Column(DateTime)
layer_id = Column(Integer, ForeignKey('annotation_layer.id'))
short_descr = Column(String(500))
long_descr = Column(Text)
layer = relationship(
AnnotationLayer,
backref='annotation')
__table_args__ = (
Index('ti_dag_state', layer_id, start_dttm, end_dttm),
)
@property
def data(self):
return {
'start_dttm': self.start_dttm,
'end_dttm': self.end_dttm,
'short_descr': self.short_descr,
'long_descr': self.long_descr,
'layer': self.layer.name if self.layer else None,
}
|
alanmcruickshank/superset-dev
|
superset/models/annotations.py
|
Python
|
apache-2.0
| 1,544 |
class Sample(object):
"""
A data point of the Metric
:param metricId: Metric FQN
:type metricId: string
:param timestamp: Timestamp for the sample
:type timestamp: int
:param value: Value of the sample
:type value: float
:param min: Minimum of the sample
:type min: float
:param max: Maximum of the sample
:type max: float
:param avg: Average of the sample
:type avg: float
:param sum: Sum of the sample
:type sum: float
:param cnt: Count of the sample
:type cnt: float
"""
def __init__(self,
metricId,
timestamp,
val=None,
min=None,
max=None,
avg=None,
sum=None,
cnt=None):
self.metricId = metricId
self.timestamp = timestamp
if val is not None:
self.val = val
if max is not None:
self.max = max
if avg is not None:
self.avg = avg
if cnt is not None:
self.cnt = cnt
if min is not None:
self.min = min
if sum is not None:
self.sum = sum
|
Netuitive/netuitive-client-python
|
netuitive/sample.py
|
Python
|
apache-2.0
| 1,267 |
#! /usr/bin/python
"""Src-depend is a simple tool for sketching source code dependency graphs
from source code itself. It iterates through all source code files in given
directory, finds import statements and turns them into edges of a dependency
graph.
Uses graphviz for sketching graphs."""
import argparse
import graphviz
import logging
import os.path
import re
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--lang', dest='lang', default='python',
help='specifies language plugin to be used (defaults to python)')
parser.add_argument('-o', '--output', dest='img_out',
help='output sketched graph to specified file (appends extension automatiaclly); source will be output to IMG_OUT')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='debug mode')
parser.add_argument('-f', '--output-format', dest='format', default='png',
help='specifies output image\'s format (defaults to .png')
parser.add_argument('-r', '--remove-redundant', dest='remove-redundant',
action='store_true', help='remove direct dependencies on modules that module depends on indirectly')
parser.add_argument('-e', '--exclude', dest='excludes', nargs='+', default=[],
help='a filename to ommit (multiple names possible)')
parser.add_argument('--exclude-regex', dest='exclude-regex', default=None,
help='filenames matching specified regex will be ignored')
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true',
help='quiet mode')
parser.add_argument('target', help='source code directory to analyse')
return parser.parse_args().__dict__
def main(args):
log_level = logging.INFO
if args['debug']:
log_level = logging.DEBUG
elif args['quiet']:
log_level = logging.ERROR
logging.basicConfig(
level=log_level,
format='[%(asctime)s; %(levelname)s]: %(message)s'
)
is_excluded = exclude_checker(args['excludes'], args['exclude-regex'])
try:
import_obj = __import__('plugins.{}'.format(args['lang']))
plugin = getattr(import_obj, args['lang'])
except ImportError:
logging.error('Could not find plugin for {}!'.format(args['lang']))
return 1
files = find_source_files(args['target'], plugin.Module.filename_ext, is_excluded)
for f in files:
with open(f, 'r') as file:
plugin.Module(file, args['target'])
plugin.Module.create_dependency_tree()
if args['remove-redundant']:
plugin.Module.remove_redundant_dependencies()
graph = make_graph(*plugin.Module.registry)
graph.format = args['format']
if not args['img_out'] is None:
output = graph.render(args['img_out'])
logging.info('Writing graph image to {}...'.format(output))
def make_graph(*modules):
graph = graphviz.Digraph()
for module in modules:
graph.node(module.filename, module.name, module.attributes)
logging.debug('Creating node {}...'.format(module.name))
for dep in module.dependencies:
if not dep is None:
logging.debug('Creating dependency of {} on {}'.format(
module.name, dep.name
))
graph.edge(module.filename, dep.filename)
return graph
def find_source_files(path, ext, is_excluded):
basename = os.path.basename(path)
if is_excluded(basename):
logging.debug('Ommitting excluded path: {}...'.format(path))
elif not basename == '.' and basename.startswith('.'):
logging.debug('Ommitting hidden path: {}...'.format(path))
elif os.path.isfile(path) and path.endswith(ext):
logging.info('{} recoginzed as source file.'.format(path))
yield path
elif os.path.isdir(path):
logging.debug('In dir "{}": {}'.format(path, os.listdir(path)))
for f in os.listdir(path):
for el in find_source_files(os.path.join(path, f), ext, is_excluded):
yield el
else:
logging.debug('{} is not a source file.'.format(path))
def exclude_checker(excluded, regex):
if regex is None:
return lambda filename: filename in excluded
else:
compiled_regex = re.compile(regex)
return lambda filename:filename in excluded \
or compiled_regex.match(filename)
if __name__ == '__main__':
exit(main(parseargs()))
|
Sventimir/src-depend
|
depend.py
|
Python
|
apache-2.0
| 4,469 |
from setuptools import setup, find_packages
setup(
name = 'jper-oaipmh',
version = '1.0.0',
packages = find_packages(),
install_requires = [
"octopus==1.0.0",
"esprit",
"Flask"
],
url = 'http://cottagelabs.com/',
author = 'Cottage Labs',
author_email = '[email protected]',
description = 'OAI-PMH endpoint for JPER',
classifiers = [
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
JiscPER/jper-oaipmh
|
setup.py
|
Python
|
apache-2.0
| 610 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
import time
import sys
import subprocess
import hashlib
import base64
import re
import glob
import TestUtils
import socket
from TestUtils import TestUtilsMixin
log = logging.getLogger('test.auto')
def globbase(root, name):
return glob.glob(os.path.join(root, name))[0]
def globa(name):
return globbase(TestUtils.ACCUMULO_HOME, name)
class MapReduceTest(TestUtilsMixin,unittest.TestCase):
"""The test is used to test the functionality of a map reduce job on accumulo
Here are the steps of this test
1.Create a file called mapred_ftest_input with x number of lines with 1 value per line
2.Put file on Hadoop
3.Run Map Reduce Test that hashes the lines in the input (MD5) and puts each hash on its own row
4.Generate Hashes on the same input in test
5.Read table and compare hashes. Fail if they do not match
6.Delete mapred_ftset_input from hadoop
"""
order = 21
tablename = "mapredf"
input_cfcq = "cf-HASHTYPE:cq-NOTHASHED"
output_cfcq = "cf-HASHTYPE:cq-MD5BASE64"
example_class_to_run ="org.apache.accumulo.examples.simple.mapreduce.RowHash"
def setUp(self):
if not os.getenv("ZOOKEEPER_HOME"):
self.fail("ZOOKEEPER_HOME environment variable is not set please set the location of ZOOKEEPER home in this environment variable")
return
TestUtilsMixin.setUp(self)
def tearDown(self):
TestUtilsMixin.tearDown(self)
def runTest(self):
#These Environment variables are need to run this test it will fail if they are not in the environment
thriftjar = globa(os.path.join('lib','libthrift.jar'))
examples = globa(os.path.join('lib','accumulo-examples-simple.jar'))
core = globa(os.path.join('lib','accumulo-core.jar'))
fate = globa(os.path.join('lib','accumulo-fate.jar'))
start = globa(os.path.join('lib','accumulo-start.jar'))
jcommander = globa(os.path.join('lib','jcommander.jar'))
trace = globa(os.path.join('lib','accumulo-trace.jar'))
zkjar = globbase(os.getenv("ZOOKEEPER_HOME"),"zookeeper*[!javadoc|src|bin].jar")
self.createInputTableInAccumulo();
#Arguments for the Example Class
arg_list = ['-i', TestUtils.INSTANCE_NAME,
'-z', TestUtils.ZOOKEEPERS,
'-u', TestUtils.ROOT,
'-p', TestUtils.ROOT_PASSWORD,
'-t', self.tablename,
'--column', self.input_cfcq]
#MapReduce class to run
mapred_class= [self.accumulo_sh(),self.example_class_to_run]
#classes needed to run the mapreduce
libjars = ["-libjars",",".join([zkjar,thriftjar,examples,core,fate,trace,jcommander])]
cmd = mapred_class+libjars+arg_list
if(self.isAccumuloRunning()):
log.debug("COMMAND:"+str(cmd))
handle = self.runOn(self.masterHost(), cmd)
out, err = handle.communicate()
log.debug(out)
log.debug(err)
log.debug("Return code: "+str(handle.returncode))
log.debug("\n\n!!!FINISHED!!!\n\n")
if(handle.returncode==0):
self.checkResults()
else:
self.fail("Test did not finish")
def isAccumuloRunning(self):
output = subprocess.Popen(["jps","-m"],stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[0]
if(output.find("tserver")!=-1 and output.find("master")!=-1):
return True
return False
def retrieveValues(self,tablename,cfcq):
input = "table %s\nscan\n" % tablename
out,err,code = self.rootShell(self.masterHost(),input)
#print out
restr1 = "[0-9].*\[\] (.*)"
restr2 = "[0-9] %s \[\] (.*)"%(cfcq)
val_list = re.findall(restr2,out)
return val_list
def checkResults(self):
control_values = [base64.b64encode(hashlib.md5("row%s"%(i)).digest()) for i in range(10)]
experiment_values = self.retrieveValues(self.tablename, self.output_cfcq)
self.failIf(len(control_values) != len(experiment_values), "List aren't the same length")
diff=[ev for ev in experiment_values if ev not in control_values]
self.failIf(len(diff)>0, "Start and MapReduced Values aren't not the same")
def fakeMRResults(self):
vals = self.retrieveValues(self.tablename, self.input_cfcq)
values = ["insert %s %s %s\n" % (i,self.output_cfcq.replace(":"," "),base64.b64encode(hashlib.md5("row%s" % i).digest())) for i in range(10,20)]
input = "table %s\n" % (self.tablename,)+"".join(values)
out,err,code = self.rootShell(self.masterHost(),input)
#print "FAKE",out
def createInputTableInAccumulo(self):
#my leet python list comprehensions skills in action
values = ["insert %s %s row%s\n" % (i,self.input_cfcq.replace(":"," "),i) for i in range(10)]
input = "createtable %s\ntable %s\n" % (self.tablename,self.tablename) + \
"".join(values)
out,err,code = self.rootShell(self.masterHost(),input)
#print "CREATE",out
def suite():
result = unittest.TestSuite()
result.addTest(MapReduceTest())
return result
|
wjsl/jaredcumulo
|
test/system/auto/simple/mapreduce.py
|
Python
|
apache-2.0
| 6,108 |
# Copyright (c) 2017 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Remove AIM DB tables' dipendencies
Revision ID: baccabeffa81
Revises: de3ed29972f1
Create Date: 2016-07-07 15:29:38.013141
"""
# revision identifiers, used by Alembic.
revision = 'baccabeffa81'
down_revision = 'de3ed29972f1'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
FK = 'foreignkey'
with op.batch_alter_table('aim_bridge_domains') as batch_op:
batch_op.drop_constraint('fk_bd_tn', type_=FK)
with op.batch_alter_table('aim_subnets') as batch_op:
batch_op.drop_constraint('fk_bd', type_=FK)
with op.batch_alter_table('aim_vrfs') as batch_op:
batch_op.drop_constraint('fk_vrf_tn', type_=FK)
with op.batch_alter_table('aim_app_profiles') as batch_op:
batch_op.drop_constraint('fk_ap_tn', type_=FK)
with op.batch_alter_table('aim_endpoint_groups') as batch_op:
batch_op.drop_constraint('fk_app_profile', type_=FK)
with op.batch_alter_table('aim_filters') as batch_op:
batch_op.drop_constraint('fk_flt_tn', type_=FK)
with op.batch_alter_table('aim_filter_entries') as batch_op:
batch_op.drop_constraint('fk_filter', type_=FK)
with op.batch_alter_table('aim_contracts') as batch_op:
batch_op.drop_constraint('fk_brc_tn', type_=FK)
with op.batch_alter_table('aim_contract_subjects') as batch_op:
batch_op.drop_constraint('fk_contract', type_=FK)
with op.batch_alter_table('aim_endpoints') as batch_op:
batch_op.drop_constraint('fk_epg', type_=FK)
with op.batch_alter_table('aim_l3outsides') as batch_op:
batch_op.drop_constraint('fk_l3o_tn', type_=FK)
with op.batch_alter_table('aim_external_networks') as batch_op:
batch_op.drop_constraint('fk_l3out', type_=FK)
with op.batch_alter_table('aim_external_subnets') as batch_op:
batch_op.drop_constraint('fk_ext_net', type_=FK)
with op.batch_alter_table('aim_vmm_controllers') as batch_op:
batch_op.drop_constraint('fk_vmm_controller_vmm_domain', type_=FK)
with op.batch_alter_table('aim_vmm_inj_deployments') as batch_op:
batch_op.drop_constraint('fk_inj_depl_inj_ns', type_=FK)
with op.batch_alter_table('aim_vmm_inj_replica_sets') as batch_op:
batch_op.drop_constraint('fk_inj_repl_set_inj_ns', type_=FK)
with op.batch_alter_table('aim_vmm_inj_services') as batch_op:
batch_op.drop_constraint('fk_inj_service_inj_ns', type_=FK)
with op.batch_alter_table('aim_vmm_inj_cont_groups') as batch_op:
batch_op.drop_constraint('fk_inj_group_inj_ns', type_=FK)
with op.batch_alter_table('aim_device_clusters') as batch_op:
batch_op.drop_constraint('fk_ldc_tn', type_=FK)
with op.batch_alter_table('aim_device_cluster_ifs') as batch_op:
batch_op.drop_constraint('fk_dci_dc', type_=FK)
with op.batch_alter_table('aim_concrete_devices') as batch_op:
batch_op.drop_constraint('fk_conc_dev_dc', type_=FK)
with op.batch_alter_table('aim_concrete_device_ifs') as batch_op:
batch_op.drop_constraint('fk_conc_dev_if_conc_dev', type_=FK)
with op.batch_alter_table('aim_service_graph_connections') as batch_op:
batch_op.drop_constraint('fk_sgc_sg', type_=FK)
with op.batch_alter_table('aim_service_graph_nodes') as batch_op:
batch_op.drop_constraint('fk_sgn_sg', type_=FK)
with op.batch_alter_table('aim_service_graphs') as batch_op:
batch_op.drop_constraint('fk_svcgr_tn', type_=FK)
with op.batch_alter_table('aim_service_redirect_policies') as batch_op:
batch_op.drop_constraint('fk_srp_tn', type_=FK)
with op.batch_alter_table('aim_device_cluster_contexts') as batch_op:
batch_op.drop_constraint('fk_dcctx_tn', type_=FK)
with op.batch_alter_table('aim_device_cluster_if_contexts') as batch_op:
batch_op.drop_constraint('fk_dc_if_ctx_dcctx', type_=FK)
with op.batch_alter_table('aim_security_group_subjects') as batch_op:
batch_op.drop_constraint('fk_sg_subject', type_=FK)
with op.batch_alter_table('aim_security_group_rules') as batch_op:
batch_op.drop_constraint('fk_sg_rule', type_=FK)
with op.batch_alter_table('aim_security_groups') as batch_op:
batch_op.drop_constraint('fk_sg_tn', type_=FK)
def downgrade():
pass
|
noironetworks/aci-integration-module
|
aim/db/migration/alembic_migrations/versions/baccabeffa81_remove_fks.py
|
Python
|
apache-2.0
| 4,936 |
"""List diff preferences associated with one's account"""
# pylint: disable=invalid-name
import argparse
import logging
from libpycr.exceptions import PyCRError
from libpycr.gerrit.client import Gerrit
from libpycr.meta import GerritAccountBuiltin
from libpycr.pager import Pager
from libpycr.utils.commandline import expect_account_as_positional
from libpycr.utils.output import checkmark
from libpycr.utils.system import fail
from prettytable import PrettyTable
class LsDiffPrefs(GerritAccountBuiltin):
"""Implement the LS-DIFF-PREFS command"""
# Logger for this command
log = logging.getLogger(__name__)
@property
def name(self):
return 'ls-diff-prefs'
@property
def description(self):
return 'list diff preferences'
@staticmethod
def parse_command_line(arguments):
"""Parse the LS-DIFF-PREFS command command-line arguments
Returns the account id that is provided on the command line. If no
account is provided, returns None.
:param arguments: a list of command-line arguments to parse
:type arguments: list[str]
:rtype: str
"""
parser = argparse.ArgumentParser(
description='List account diff preferences')
expect_account_as_positional(parser)
cmdline = parser.parse_args(arguments)
# fetch changes details
return cmdline.account
def run(self, arguments, *args, **kwargs):
account_id = self.parse_command_line(arguments)
try:
account = Gerrit.get_account(account_id or 'self')
prefs = Gerrit.get_diff_prefs(account_id or 'self')
except PyCRError as why:
fail('cannot list account diff preferences', why)
table = PrettyTable(['Preference', 'Value'])
table.align['Preference'] = 'l'
table.align['Value'] = 'c'
table.add_row(['Context', prefs.context])
table.add_row(['Expand all comments',
checkmark(prefs.expand_all_comments)])
table.add_row(['Ignore whitespace', prefs.ignore_whitespace])
table.add_row(['Intraline difference',
checkmark(prefs.intraline_difference)])
table.add_row(['Line length', prefs.line_length])
table.add_row(['Manual review', checkmark(prefs.manual_review)])
table.add_row(['Retain header', checkmark(prefs.retain_header)])
table.add_row(['Show line endings',
checkmark(prefs.show_line_endings)])
table.add_row(['Show tabs', checkmark(prefs.show_tabs)])
table.add_row(['Show whitespace errors',
checkmark(prefs.show_whitespace_errors)])
table.add_row(['Skip deleted', checkmark(prefs.skip_deleted)])
table.add_row(['Skip uncommented', checkmark(prefs.skip_uncommented)])
table.add_row(['Syntax highlighting',
checkmark(prefs.syntax_highlighting)])
table.add_row(['Tab size', prefs.tab_size])
with Pager(command=self.name):
print 'Account: {}'.format(account.username)
print table
|
JcDelay/pycr
|
libpycr/builtin/accounts/ls-diff-prefs.py
|
Python
|
apache-2.0
| 3,136 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparse tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
from tensorflow.python import _pywrap_utils
from tensorflow.python import tf2
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.types import internal
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_eval_using_default_session = ops._eval_using_default_session
_override_helper = ops._override_helper
# pylint: enable=protected-access
@tf_export("sparse.SparseTensor", "SparseTensor")
class SparseTensor(internal.NativeObject, composite_tensor.CompositeTensor):
"""Represents a sparse tensor.
TensorFlow represents a sparse tensor as three separate dense tensors:
`indices`, `values`, and `dense_shape`. In Python, the three tensors are
collected into a `SparseTensor` class for ease of use. If you have separate
`indices`, `values`, and `dense_shape` tensors, wrap them in a `SparseTensor`
object before passing to the ops below.
Concretely, the sparse tensor `SparseTensor(indices, values, dense_shape)`
comprises the following components, where `N` and `ndims` are the number
of values and number of dimensions in the `SparseTensor`, respectively:
* `indices`: A 2-D int64 tensor of shape `[N, ndims]`, which specifies the
indices of the elements in the sparse tensor that contain nonzero values
(elements are zero-indexed). For example, `indices=[[1,3], [2,4]]` specifies
that the elements with indexes of [1,3] and [2,4] have nonzero values.
* `values`: A 1-D tensor of any type and shape `[N]`, which supplies the
values for each element in `indices`. For example, given `indices=[[1,3],
[2,4]]`, the parameter `values=[18, 3.6]` specifies that element [1,3] of
the sparse tensor has a value of 18, and element [2,4] of the tensor has a
value of 3.6.
* `dense_shape`: A 1-D int64 tensor of shape `[ndims]`, which specifies the
dense_shape of the sparse tensor. Takes a list indicating the number of
elements in each dimension. For example, `dense_shape=[3,6]` specifies a
two-dimensional 3x6 tensor, `dense_shape=[2,3,4]` specifies a
three-dimensional 2x3x4 tensor, and `dense_shape=[9]` specifies a
one-dimensional tensor with 9 elements.
The corresponding dense tensor satisfies:
```python
dense.shape = dense_shape
dense[tuple(indices[i])] = values[i]
```
By convention, `indices` should be sorted in row-major order (or equivalently
lexicographic order on the tuples `indices[i]`). This is not enforced when
`SparseTensor` objects are constructed, but most ops assume correct ordering.
If the ordering of sparse tensor `st` is wrong, a fixed version can be
obtained by calling `tf.sparse.reorder(st)`.
Example: The sparse tensor
```python
SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
```
represents the dense tensor
```python
[[1, 0, 0, 0]
[0, 0, 2, 0]
[0, 0, 0, 0]]
```
"""
@classmethod
def from_value(cls, sparse_tensor_value):
if not is_sparse(sparse_tensor_value):
raise TypeError("Neither a SparseTensor nor SparseTensorValue: %s." %
sparse_tensor_value)
return SparseTensor(
indices=sparse_tensor_value.indices,
values=sparse_tensor_value.values,
dense_shape=sparse_tensor_value.dense_shape)
def __init__(self, indices, values, dense_shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
dense_shape: A 1-D int64 tensor of shape `[ndims]`.
Raises:
ValueError: When building an eager SparseTensor if `dense_shape` is
unknown or contains unknown elements (None or -1).
"""
with ops.name_scope(None, "SparseTensor", [indices, values, dense_shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.convert_to_tensor(values, name="values")
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)
self._indices = indices
self._values = values
self._dense_shape = dense_shape
self._dense_shape_default = dense_shape_default
indices_shape = indices.shape.with_rank(2)
values_shape = values.shape.with_rank(1)
dense_shape_shape = dense_shape.shape.with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape.dims[0].assert_is_compatible_with(values_shape.dims[0])
# Assert number of columns in indices matches the number of elements in
# dense_shape.
indices_shape.dims[1].assert_is_compatible_with(dense_shape_shape.dims[0])
def get_shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return self._dense_shape_default
@property
def indices(self):
"""The indices of non-zero values in the represented dense tensor.
Returns:
A 2-D Tensor of int64 with dense_shape `[N, ndims]`, where `N` is the
number of non-zero values in the tensor, and `ndims` is the rank.
"""
return self._indices
@property
def values(self):
"""The non-zero values in the represented dense tensor.
Returns:
A 1-D Tensor of any data type.
"""
return self._values
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_values`.
This method produces a new `SparseTensor` that has the same nonzero
`indices` and same `dense_shape`, but updated values.
Args:
new_values: The values of the new `SparseTensor`. Needs to have the same
shape as the current `.values` `Tensor`. May have a different type than
the current `values`.
Returns:
A `SparseTensor` with identical indices and shape but updated values.
Example usage:
>>> st = tf.sparse.from_dense([[1, 0, 2, 0], [3, 0, 0, 4]])
>>> tf.sparse.to_dense(st.with_values([10, 20, 30, 40])) # 4 nonzero values
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[10, 0, 20, 0],
[30, 0, 0, 40]], dtype=int32)>
"""
return SparseTensor(self._indices, new_values, self._dense_shape)
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self._values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._values.dtype
@property
def dense_shape(self):
"""A 1-D Tensor of int64 representing the shape of the dense tensor."""
return self._dense_shape
@property
def shape(self):
"""Get the `TensorShape` representing the shape of the dense tensor.
Returns:
A `TensorShape` object.
"""
return self._dense_shape_default
@property
def graph(self):
"""The `Graph` that contains the index, value, and dense_shape tensors."""
return self._indices.graph
def __str__(self):
return "SparseTensor(indices=%s, values=%s, dense_shape=%s)" % (
self._indices, self._values, self._dense_shape)
def eval(self, feed_dict=None, session=None):
"""Evaluates this sparse tensor in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `SparseTensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
Returns:
A `SparseTensorValue` object.
"""
indices, values, dense_shape = _eval_using_default_session(
[self.indices, self.values, self.dense_shape], feed_dict, self.graph,
session)
return SparseTensorValue(indices, values, dense_shape)
@staticmethod
def _override_operator(operator, func):
_override_helper(SparseTensor, operator, func)
@property
def _type_spec(self):
return SparseTensorSpec(self.shape, self.dtype)
def _shape_invariant_to_type_spec(self, shape):
# From the tf.while_loop docs: "If a loop variable is a SparseTensor, the
# shape invariant must be TensorShape([r]) where r is the rank of the dense
# tensor represented by the sparse tensor. It means the shapes of the three
# tensors of the SparseTensor are ([None], [None, r], [r]). NOTE: The shape
# invariant here is the shape of the SparseTensor.dense_shape property. It
# must be the shape of a vector.
if shape.ndims is not None and shape.ndims != 1:
raise ValueError("Expected a shape with 1 dimension")
rank = tensor_shape.dimension_value(shape[0])
return SparseTensorSpec(tensor_shape.unknown_shape(rank), self.dtype)
def consumers(self):
return self._consumers()
SparseTensorValue = collections.namedtuple("SparseTensorValue",
["indices", "values", "dense_shape"])
tf_export(v1=["SparseTensorValue"])(SparseTensorValue)
_pywrap_utils.RegisterType("SparseTensorValue", SparseTensorValue)
@tf_export("SparseTensorSpec")
@type_spec.register("tf.SparseTensorSpec")
class SparseTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.sparse.SparseTensor`."""
__slots__ = ["_shape", "_dtype"]
value_type = property(lambda self: SparseTensor)
def __init__(self, shape=None, dtype=dtypes.float32):
"""Constructs a type specification for a `tf.sparse.SparseTensor`.
Args:
shape: The dense shape of the `SparseTensor`, or `None` to allow any dense
shape.
dtype: `tf.DType` of values in the `SparseTensor`.
"""
self._shape = tensor_shape.as_shape(shape)
self._dtype = dtypes.as_dtype(dtype)
def _serialize(self):
return (self._shape, self._dtype)
@property
def dtype(self):
"""The `tf.dtypes.DType` specified by this type for the SparseTensor."""
return self._dtype
@property
def shape(self):
"""The `tf.TensorShape` specified by this type for the SparseTensor."""
return self._shape
@property
def _component_specs(self):
rank = self._shape.ndims
num_values = None
return [
tensor_spec.TensorSpec([num_values, rank], dtypes.int64),
tensor_spec.TensorSpec([num_values], self._dtype),
tensor_spec.TensorSpec([rank], dtypes.int64)]
def _to_components(self, value):
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
return [value.indices, value.values, value.dense_shape]
def _from_components(self, tensor_list):
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
return SparseTensorValue(*tensor_list)
else:
return SparseTensor(*tensor_list)
# The SparseTensorSpec tensor_list encoding uses (de)serialize_sparse ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mrry): The default flat shape of a boxed `SparseTensor` is `(3,)`,
# but a `SparseTensorSpec` can also represent a batch of boxed
# `SparseTensor` objects with shape `(..., 3)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
value = SparseTensor.from_value(value)
return [gen_sparse_ops.serialize_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _to_batched_tensor_list(self, value):
dense_shape = tensor_util.constant_value_as_shape(value.dense_shape)
if self._shape.merge_with(dense_shape).ndims == 0:
raise ValueError(
"Unbatching a sparse tensor is only supported for rank >= 1")
return [gen_sparse_ops.serialize_many_sparse(
value.indices, value.values, value.dense_shape,
out_type=dtypes.variant)]
def _from_compatible_tensor_list(self, tensor_list):
tensor_list = gen_sparse_ops.deserialize_sparse(tensor_list[0], self._dtype)
indices, values, dense_shape = tensor_list
rank = self._shape.ndims
indices.set_shape([None, rank])
# We restore the dense_shape from the SparseTypeSpec. This is necessary
# for shape inference when using placeholder SparseTensors in function
# tracing.
if self._shape.is_fully_defined():
dense_shape = ops.convert_to_tensor(
self._shape, dtype=dtypes.int64, name="shape")
elif (self._shape.rank is not None and
any(dim.value is not None for dim in self._shape.dims)):
# array_ops imports sparse_tensor.py. Local import to avoid import cycle.
from tensorflow.python.ops import array_ops # pylint: disable=g-import-not-at-top
pieces = array_ops.unstack(dense_shape, num=self._shape.rank)
for i, dim in enumerate(self._shape.dims):
if dim.value is not None:
pieces[i] = constant_op.constant(dim.value, dense_shape.dtype)
dense_shape = array_ops.stack(pieces)
else:
dense_shape.set_shape([rank])
return SparseTensor(indices, values, dense_shape)
def _batch(self, batch_size):
return SparseTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype)
def _unbatch(self):
if self._shape.ndims == 0:
raise ValueError("Unbatching a tensor is only supported for rank >= 1")
return SparseTensorSpec(self._shape[1:], self._dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return SparseTensor
@classmethod
def from_value(cls, value):
if isinstance(value, SparseTensor):
return cls(value.shape, value.dtype)
if isinstance(value, SparseTensorValue):
if isinstance(value.values, np.ndarray):
return cls(value.dense_shape, value.values.dtype)
else:
return cls.from_value(SparseTensor.from_value(value))
else:
raise TypeError("Expected SparseTensor or SparseTensorValue")
# TODO(b/133606651) Delete the SparseTensor registration when CompositeTensor
# is updated to define a _type_spec field (since registration will be
# automatic). Do *not* delete the SparseTensorValue registration.
type_spec.register_type_spec_from_value_converter(
SparseTensor, SparseTensorSpec.from_value)
type_spec.register_type_spec_from_value_converter(
SparseTensorValue, SparseTensorSpec.from_value)
@tf_export(v1=["convert_to_tensor_or_sparse_tensor"])
def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
"""Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, SparseTensorValue):
value = SparseTensor.from_value(value)
if isinstance(value, SparseTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise RuntimeError("Sparse dtype: requested = %s, actual = %s" %
(dtype.name, value.dtype.name))
return value
return ops.convert_to_tensor(value, dtype=dtype, name=name)
def is_sparse(x):
"""Check whether `x` is sparse.
Check whether an object is a `tf.sparse.SparseTensor` or
`tf.compat.v1.SparseTensorValue`.
Args:
x: A python object to check.
Returns:
`True` iff `x` is a `tf.sparse.SparseTensor` or
`tf.compat.v1.SparseTensorValue`.
"""
return isinstance(x, (SparseTensor, SparseTensorValue))
|
freedomtan/tensorflow
|
tensorflow/python/framework/sparse_tensor.py
|
Python
|
apache-2.0
| 17,827 |
__author__ = 'Tony Beltramelli www.tonybeltramelli.com - 06/09/2015'
import numpy as np
class Label:
def __init__(self, path):
file_path = "{}labels.csv".format(path)
try:
data = np.genfromtxt(file_path, delimiter=',', skip_header=1,
names=['timestamp', 'label'], dtype=[("timestamp", long), ('label', int)])
self.has_label = True
except IOError:
self.has_label = False
return
self.timestamp = data['timestamp']
label = data['label']
self.label = []
for i in range(0, len(label)):
self.label.append(chr(int(label[i])))
self.diff = np.diff(self.timestamp)
|
tonybeltramelli/Deep-Spying
|
server/analytics/modules/label/Label.py
|
Python
|
apache-2.0
| 725 |
def parse_event(raw_event,preserve_backslash=False,preserve_dot=False):
in_string = False
words = []
d = {}
key = None
curr = []
for c in raw_event:
if c == '\\' and not preserve_backslash:
continue
elif c == '"':
in_string = not in_string
elif c == ' ':
if in_string:
curr.append(c)
else:
if key:
val = ''.join(curr)
d[key] = decodeCounters(val) if key == 'COUNTERS' else val
key = None
else:
word = ''.join(curr)
if preserve_dot or word != '.':
words.append( ''.join(curr) )
curr = []
elif c == '=':
key = ''.join(curr)
curr = []
else:
curr.append(c)
if in_string:
curr.append(c)
else:
if key:
d[key] = ''.join(curr)
key = None
else:
word = ''.join(curr)
if preserve_dot or word != '.':
words.append( ''.join(curr) )
curr = []
return words,d
def decodeCounters(counters):
raw_counter_families = counters[1:-1].split('}{')
counter_families = {}
for raw_family in raw_counter_families:
splitted = raw_family.split('[')
name,desc = decodeCounterKey( splitted[0] )
raw_counters = [s[:-1] if s[-1] == ']' else s for s in splitted[1:]]
counters = {}
for raw_counter in raw_counters:
cname,fdesc,val = decodeCounterKey(raw_counter)
#counters[cname] = Counter(cname,fdesc,val)
counters[cname] = (fdesc,val)
#counter_families[name] = CounterFamily(name,desc,counters)
counter_families[name] = (name,desc,counters)
return counter_families
def decodeCounterKey(s):
return s[1:-1].split(')(')
|
melrief/Hadoop-Log-Tools
|
hadoop/log/convert/libjobevent.py
|
Python
|
apache-2.0
| 1,665 |
# Copyright (c) 2013 Yogesh Panchal, [email protected]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import logging
import requests
from cliff.command import Command
from .utils import read_creds
class Wikiget(Command):
"""
* Get wiki page created for repository
"""
log = logging.getLogger(__name__ + '.Wikiget')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Wikiget, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
metavar='<account name>',
required=True,
help='Your account name')
parser.add_argument(
'--reponame',
'-r',
metavar='<repo name>',
required=True,
help='The repository name')
parser.add_argument(
'--page',
'-p',
metavar='<page name>',
required=True,
help='The page title')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a})'.format(a=parsed_args))
url = ("https://bitbucket.org/api/1.0/"
"repositories/{a.account}/{a.reponame}/"
"wiki/{a.page}/").format(a=parsed_args)
user, passwd = read_creds()
r = requests.get(url, auth=(user, passwd))
if r.status_code == 200:
data = json.loads(r.text)
msg = """
Markup: {d[markup]}
Revision: {d[rev]}
Page Content: {d[data]}
"""
print(msg.format(d=data))
else:
print("\n Error: '404' No Wiki Pages Found"
" 'or' Invalid argument supplied.\n")
sys.exit(1)
class Wikipost(Command):
"""
* Post new wiki page for repositorys
"""
log = logging.getLogger(__name__ + '.Wikipost')
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
def get_parser(self, prog_name):
parser = super(Wikipost, self).get_parser(prog_name)
parser.add_argument(
'--account',
'-a',
metavar='<account name>',
required=True,
help='Your account name')
parser.add_argument(
'--reponame',
'-r',
metavar='<repo name>',
required=True,
help='The repository name')
parser.add_argument(
'--page',
'-p',
metavar='<page name>',
required=True,
help='The page title')
parser.add_argument(
'--content',
'-c',
metavar='<page content>',
required=True,
help='The page content')
return parser
def take_action(self, parsed_args):
self.log.debug('take_action({a}s)'.format(a=parsed_args))
args = {}
args['content'] = parsed_args.content
url = ("https://bitbucket.org/api/1.0/"
"repositories/{a.account}/{a.reponame}/"
"wiki/{a.page}/").format(a=parsed_args)
user, passwd = read_creds()
r = requests.post(url, data=args, auth=(user, passwd))
print(r.text)
if r.status_code == 200:
print("\n Wiki Page Created Successfully.\n")
else:
msg = ("\n Error: '{r.status_code}' "
"Something Went Wrong -- Bitbucket.\n")
print(msg.format(r=r))
sys.exit(1)
|
yspanchal/bitbucketcli
|
bitbucket/wiki.py
|
Python
|
apache-2.0
| 4,056 |
import argparse
import requests
import logging
import pip._internal
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Get the nth version of a given package')
parser.add_argument('--package', type=str, required=True, help='The PyPI you want to inspect')
parser.add_argument('--nth_last_version', type=int, default=1, help='The nth last package will be retrieved')
parser.add_argument('--prerelease', help='Get PreRelease Package Version', action='store_true')
parser.add_argument('--debug', help='Print debug information', action='store_true')
args = parser.parse_args()
logger = logging.getLogger("PyPI_CLI")
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if args.debug:
logger.setLevel(logging.DEBUG)
logger.debug("Package: %s" % args.package)
logger.debug("nth_last_version: %s" % args.nth_last_version)
logger.debug("prerelease: %s" % args.prerelease)
logger.debug("debug: %s" % args.debug)
finder = pip._internal.index.PackageFinder([], ['https://pypi.python.org/simple'], session=requests.Session())
results = finder.find_all_candidates(args.package)
tmp_versions = [str(p.version) for p in results]
logger.debug("%s" % tmp_versions)
versions = list()
for el in tmp_versions:
if el not in versions:
versions.append(el)
pos = -1
nth_version = 1
while True:
fetched_version = versions[pos]
logger.debug("Version: %s" % fetched_version)
if nth_version == args.nth_last_version:
if args.prerelease or not ("rc" in fetched_version or "a" in fetched_version or "b" in fetched_version):
break
else:
pos -= 1
continue
pos -= 1
nth_version += 1
print(fetched_version)
|
zsdonghao/tensorlayer
|
docker/pypi_list.py
|
Python
|
apache-2.0
| 1,987 |
from django.shortcuts import render, render_to_response, RequestContext
from .forms import SignUpForm
# Create your views here.
def home(request):
form = SignUpForm(request.POST or None)
if form.is_valid():
save_it = form.save(commit=False)
save_it.save()
return render_to_response("signup.html", locals(), context_instance=RequestContext(request))
|
sonnykr/blog
|
SignUps/views.py
|
Python
|
apache-2.0
| 389 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
"""
This script copies all of the .symcache files referenced by the specified
ETW trace to a specified directory so that they can easily be shared with
other people. The package export option in WPA (File-> Export Package) makes
this functionality mostly obsolete (Export Package puts the .symcache files in
a .wpapk file along with the trace and the current profile) but this script is
retained because it does give additional flexibility and serves as an example.
"""
import os
import re
import shutil
import sys
import csv
import subprocess
# This regular expression takes apart the CodeView Record block.
pdb_re = re.compile(r'\[RSDS\] PdbSig: {(.*-.*-.*-.*-.*)}; Age: (.*); Pdb: (.*)')
def ParseRow(row):
"""Take a CSV row record from an xperf -i -a symcache command and parse it.
The cvRecord is broken up into its constituent guid, age, and PDBPath
parts and the integer components are turned into integers (stripping 0x
headers in the process). The "-" characters in the guid are removed.
If the record contains the file header that labels the columns then None
is returned.
"""
TimeDateStamp, ImageSize, OrigFileName, cvRecord = row
if TimeDateStamp == "TimeDateStamp":
# Ignore the column labels
return None
TimeDateStamp = int(TimeDateStamp, 0)
ImageSize = int(ImageSize, 0)
# Assume that this re match will always succeed.
result = pdb_re.match(cvRecord)
guid, age, PDBPath = result.groups()
guid = guid.replace("-", "")
age = int(age) # Note that the age is in decimal here
return TimeDateStamp, ImageSize, OrigFileName, guid, age, PDBPath
def main():
if len(sys.argv) < 3:
print("Syntax: PackETWSymbols ETWFilename.etl destdirname [-verbose]")
print("This script looks for symbols needed to decode the specified trace, and")
print("copies them to the specified directory. This allows moving traces to")
print("other machines for analysis and sharing.")
sys.exit(0)
# Our usage of subprocess seems to require Python 2.7+
if sys.version_info.major == 2 and sys.version_info.minor < 7:
print("Your python version is too old - 2.7 or higher required.")
print("Python version is %s" % sys.version)
sys.exit(0)
ETLName = sys.argv[1]
DestDirName = sys.argv[2]
if not os.path.exists(DestDirName):
os.mkdir(DestDirName)
verbose = False
if len(sys.argv) > 3 and sys.argv[3].lower() == "-verbose":
verbose = True
print("Extracting symbols from ETL file '%s'." % ETLName)
print("Note that you will probably need to update the 'interesting' calculation for your purposes.")
# -tle = tolerate lost events
# -tti = tolerate time ivnersions
# -a symcache = show image and symbol identification (see xperf -help processing)
# Options to the symcache option (see xperf -help symcache)
# -quiet = don't issue warnings
# -build = build the symcache, including downloading symbols
# -imageid = show module size/data/name information
# -dbgid = show PDB guid/age/name information
command = "xperf.exe -i \"%s\" -tle -tti -a symcache -quiet -imageid -dbgid" % ETLName
# The -symbols option can be added in front of -a if symbol loading from
# symbol servers is desired.
# Typical output lines (including the heading) look like this:
#TimeDateStamp, ImageSize, OrigFileName, CodeView Record
# 0x4da89d03, 0x00bcb000, "client.dll", "[RSDS] PdbSig: {7b2a9028-87cd-448d-8500-1a18cdcf6166}; Age: 753; Pdb: u:\buildbot\dota_staging_win32\build\src\game\client\Release_dota\client.pdb"
print("Executing command '%s'" % command)
lines = subprocess.check_output(command).decode().splitlines()
matchCount = 0
matchExists = 0
interestingModuleCount = 0
symCachePathBase = os.getenv("_NT_SYMCACHE_PATH");
if symCachePathBase == None or len(symCachePathBase) == 0:
# Set SymCache to the C drive if not specified.
symCachePathBase = "c:\\symcache\\"
print("_NT_SYMCACHE_PATH not set. Looking for symcache in %s" % symCachePathBase)
if symCachePathBase[-1] != '\\':
symCachePathBase += '\\'
for row in csv.reader(lines, delimiter = ",", quotechar = '"', skipinitialspace=True):
results = ParseRow(row)
if not results:
continue
TimeDateStamp, ImageSize, OrigFileName, guid, age, PDBPath = results
matchCount += 1
# Find out which PDBs are 'interesting'. There is no obvious heuristic
# for this, so for now all symbols whose PDB path name contains a colon
# are counted, which filters out many Microsoft symbols. The ideal filter
# would return all locally built symbols - all that cannot be found on
# symbol servers - and this gets us partway there.
interesting = PDBPath.count(":") > 0 or PDBPath.count("chrome") > 0
if interesting:
interestingModuleCount += 1
# WPT has three different .symcache file patterns. None are documented but
# all occur in the symcache directory.
symCachePathv1 = "%s-%08x%08xv1.symcache" % (OrigFileName, TimeDateStamp, ImageSize)
symCachePathv2 = "%s-%s%xv2.symcache" % (OrigFileName, guid, age)
pdb_file = os.path.split(PDBPath)[1]
symCachePathv3 = r"%s\%s%s\%s-v3.1.0.symcache" % (pdb_file, guid, age, pdb_file)
symCachePathv1 = os.path.join(symCachePathBase, symCachePathv1)
symCachePathv2 = os.path.join(symCachePathBase, symCachePathv2)
symCachePathv3 = os.path.join(symCachePathBase, symCachePathv3)
foundPath = None
if os.path.isfile(symCachePathv1):
foundPath = symCachePathv1
elif os.path.isfile(symCachePathv2):
foundPath = symCachePathv2
elif os.path.isfile(symCachePathv3):
foundPath = symCachePathv3
if foundPath:
matchExists += 1
print("Copying %s" % foundPath)
dest = foundPath[len(symCachePathBase):]
dest_dir, dest_file = os.path.split(dest)
if dest_dir:
try:
os.makedirs(os.path.join(DestDirName, dest_dir))
except:
pass # Continue on exceptions, directly probably already exists.
shutil.copyfile(foundPath, os.path.join(DestDirName, dest_dir, dest_file))
else:
if verbose:
print("Symbols for '%s' are not in %s or %s" % (OrigFileName, symCachePathv1, symCachePathv2))
else:
#This is normally too verbose
if verbose:
print("Skipping %s" % PDBPath)
if matchCount == interestingModuleCount:
print("%d symbol files found in the trace and %d of those exist in symcache." % (matchCount, matchExists))
else:
print("%d symbol files found in the trace, %d appear to be interesting, and %d of those exist in symcache." % (matchCount, interestingModuleCount, matchExists))
if matchExists > 0:
print("Symbol files found were copied to %s" % DestDirName)
if __name__ == "__main__":
main()
|
google/UIforETW
|
bin/ETWPackSymbols.py
|
Python
|
apache-2.0
| 7,629 |
from app.gcloud.utils import refresh_credentials, get_gapi_authorized_http
from datetime import datetime, timedelta
import oauth2client
import apiclient
INSTANCE_METRICS = [
'compute.googleapis.com/instance/cpu/utilization'
]
INSTANCE_DISK_METRICS = [
'compute.googleapis.com/instance/disk/read_bytes_count',
'compute.googleapis.com/instance/disk/write_bytes_count',
'compute.googleapis.com/instance/disk/read_ops_count',
'compute.googleapis.com/instance/disk/write_ops_count'
]
INSTANCE_NETWORK_METRICS = [
'compute.googleapis.com/instance/network/received_bytes_count',
'compute.googleapis.com/instance/network/sent_bytes_count'
]
def get_instance_metrics(identity, project, since=None):
http = get_gapi_authorized_http(identity)
google_monitoring_service = apiclient.discovery.build('cloudmonitoring', 'v2beta2', http=http)
if not since:
since = datetime(1970, 1, 1)
since_string = since.isoformat("T") + '+00:00'
for metric in INSTANCE_METRICS:
now_string = datetime.utcnow().isoformat("T") + '+00:00'
for metric in get_instance_metric_type(identity, project, metric, since_string, now_string):
yield metric
for metric in INSTANCE_DISK_METRICS:
now_string = datetime.utcnow().isoformat("T") + '+00:00'
for metric in get_instance_disk_metric_type(identity, project, metric, since_string, now_string):
yield metric
for metric in INSTANCE_NETWORK_METRICS:
now_string = datetime.utcnow().isoformat("T") + '+00:00'
for metric in get_instance_network_metric_type(identity, project, metric, since_string, now_string):
yield metric
def get_instance_metric_type(identity, project, metric_name, start, stop):
next_page_token = None
while True:
res = retrieve_timeseries(identity, project, metric_name, start, stop, next_page_token)
if 'timeseries' in res:
for t in res['timeseries']:
resource = t['timeseriesDesc']['labels']['compute.googleapis.com/instance_name'] if 'compute.googleapis.com/instance_name' in t['timeseriesDesc']['labels'] else t['timeseriesDesc']['labels']['compute.googleapis.com/resource_id']
base = dict(resource=resource,
metric='GCLOUD/COMPUTE:{}'.format(metric_name))
for point in t['points']:
if 'doubleValue' in point:
base.update(value=point['doubleValue'],
time=point['start'])
yield base
if not 'nextPageToken' in res:
break
else:
next_page_token = res['nextPageToken']
def get_instance_disk_metric_type(identity, project, metric_name, start, stop):
next_page_token = None
while True:
res = retrieve_timeseries(identity, project, metric_name, start, stop, next_page_token)
if 'timeseries' in res:
for t in res['timeseries']:
resource = t['timeseriesDesc']['labels']['compute.googleapis.com/device_name'] if 'compute.googleapis.com/device_name' in t['timeseriesDesc']['labels'] else t['timeseriesDesc']['labels']['compute.googleapis.com/resource_id']
base = dict(resource=resource,
metric='GCLOUD/COMPUTE:{}'.format(metric_name))
for point in t['points']:
if 'int64Value' in point:
base.update(value=point['int64Value'],
time=point['start'])
yield base
if not 'nextPageToken' in res:
break
else:
next_page_token = res['nextPageToken']
def get_instance_network_metric_type(identity, project, metric_name, start, stop):
next_page_token = None
while True:
res = retrieve_timeseries(identity, project, metric_name, start, stop, next_page_token)
if 'timeseries' in res:
for t in res['timeseries']:
resource = t['timeseriesDesc']['labels']['compute.googleapis.com/instance_name'] if 'compute.googleapis.com/instance_name' in t['timeseriesDesc']['labels'] else t['timeseriesDesc']['labels']['compute.googleapis.com/resource_id']
base = dict(resource=resource,
metric='GCLOUD/COMPUTE:{}'.format(metric_name))
for point in t['points']:
if 'int64Value' in point:
base.update(value=point['int64Value'],
time=point['start'])
yield base
if not 'nextPageToken' in res:
break
else:
next_page_token = res['nextPageToken']
def retrieve_timeseries(identity, project, metric_name, start, stop, next_page_token=None):
def get_timeseries():
http = get_gapi_authorized_http(identity)
google_monitoring_service = apiclient.discovery.build('cloudmonitoring', 'v2beta2', http=http)
res = google_monitoring_service.timeseries().list(project=project['code'], metric=metric_name, oldest=start, youngest=stop, pageToken=next_page_token).execute()
return res
try:
timeseries = get_timeseries()
except oauth2client.client.HttpAccessTokenRefreshError:
refresh_credentials(identity)
timeseries = get_timeseries()
return timeseries
|
giubil/trackit
|
api/files/api/app/gcloud/compute_metrics.py
|
Python
|
apache-2.0
| 5,327 |
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD
import numpy as np
def VGG_16(weights_path=None):
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape=(3,224,224)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1000, activation='softmax'))
if weights_path:
model.load_weights(weights_path)
return model
if __name__ == "__main__":
import cv2
im = cv2.resize(cv2.imread('cat.jpg'), (224, 224)).astype(np.float32)
im[:,:,0] -= 103.939
im[:,:,1] -= 116.779
im[:,:,2] -= 123.68
im = im.transpose((2,0,1))
im = np.expand_dims(im, axis=0)
# Test pretrained model
model = VGG_16('vgg16_weights.h5')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy')
out = model.predict(im)
print np.argmax(out)
|
qxcv/joint-regressor
|
keras/vggnet/vgg16_keras.py
|
Python
|
apache-2.0
| 2,604 |
import os
import numpy as np
import nibabel as nb
import nighresjava
from ..io import load_volume, save_volume
from ..utils import _output_dir_4saving, _fname_4saving
def levelset_curvature(levelset_image, distance=1.0,
save_data=False, overwrite=False, output_dir=None,
file_name=None):
"""Levelset curvature
Estimates surface curvature of a levelset using a quadric approximation scheme.
Parameters
----------
levelset_image: niimg
Levelset image to be turned into probabilities
distance: float, optional
Distance from the boundary in voxels where to estimate the curvature
save_data: bool, optional
Save output data to file (default is False)
overwrite: bool, optional
Overwrite existing results (default is False)
output_dir: str, optional
Path to desired output directory, will be created if it doesn't exist
file_name: str, optional
Desired base name for output files with file extension
(suffixes will be added)
Returns
----------
dict
Dictionary collecting outputs under the following keys
(suffix of output files in brackets)
* mcurv (niimg): Mean curvature (output file suffix _curv-mean)
* gcurv (niimg): Gaussian curvature (output file suffix _curv-gauss)
Notes
----------
Ported from original Java module by Pierre-Louis Bazin
"""
print("\nLevelset Curvature")
# make sure that saving related parameters are correct
if save_data:
output_dir = _output_dir_4saving(output_dir, levelset_image)
mcurv_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=levelset_image,
suffix='curv-mean'))
gcurv_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=levelset_image,
suffix='curv-gauss'))
if overwrite is False \
and os.path.isfile(mcurv_file) \
and os.path.isfile(gcurv_file) :
print("skip computation (use existing results)")
output = {'mcurv': mcurv_file, 'gcurv': gcurv_file}
return output
# load the data
lvl_img = load_volume(levelset_image)
lvl_data = lvl_img.get_data()
hdr = lvl_img.header
aff = lvl_img.affine
resolution = [x.item() for x in hdr.get_zooms()]
dimensions = lvl_data.shape
# algorithm
# start virtual machine, if not already running
try:
mem = _check_available_memory()
nighresjava.initVM(initialheap=mem['init'], maxheap=mem['max'])
except ValueError:
pass
# create algorithm instance
algorithm = nighresjava.LevelsetCurvature()
# set parameters
algorithm.setMaxDistance(distance)
# load images and set dimensions and resolution
input_image = load_volume(levelset_image)
data = input_image.get_data()
affine = input_image.get_affine()
header = input_image.get_header()
resolution = [x.item() for x in header.get_zooms()]
dimensions = input_image.shape
algorithm.setDimensions(dimensions[0], dimensions[1], dimensions[2])
algorithm.setResolutions(resolution[0], resolution[1], resolution[2])
algorithm.setLevelsetImage(nighresjava.JArray('float')(
(data.flatten('F')).astype(float)))
# execute
try:
algorithm.execute()
except:
# if the Java module fails, reraise the error it throws
print("\n The underlying Java code did not execute cleanly: ")
print(sys.exc_info()[0])
raise
return
# Collect output
mcurv_data = np.reshape(np.array(
algorithm.getMeanCurvatureImage(),
dtype=np.float32), dimensions, 'F')
gcurv_data = np.reshape(np.array(
algorithm.getGaussCurvatureImage(),
dtype=np.float32), dimensions, 'F')
hdr['cal_min'] = np.nanmin(mcurv_data)
hdr['cal_max'] = np.nanmax(mcurv_data)
mcurv = nb.Nifti1Image(mcurv_data, aff, hdr)
hdr['cal_min'] = np.nanmin(gcurv_data)
hdr['cal_max'] = np.nanmax(gcurv_data)
gcurv = nb.Nifti1Image(gcurv_data, aff, hdr)
if save_data:
save_volume(mcurv_file, mcurv)
save_volume(gcurv_file, gcurv)
return {'mcurv': mcurv_file, 'gcurv': gcurv_file}
else:
return {'mcurv': mcurv, 'gcurv': gcurv}
|
nighres/nighres
|
nighres/surface/levelset_curvature.py
|
Python
|
apache-2.0
| 4,723 |
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .base import Integer
__all__ = ["Int32"]
@type_checked
class Int32:
"""
XDR Source Code::
typedef int int32;
"""
def __init__(self, int32: int) -> None:
self.int32 = int32
def pack(self, packer: Packer) -> None:
Integer(self.int32).pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "Int32":
int32 = Integer.unpack(unpacker)
return cls(int32)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "Int32":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "Int32":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.int32 == other.int32
def __str__(self):
return f"<Int32 [int32={self.int32}]>"
|
StellarCN/py-stellar-base
|
stellar_sdk/xdr/int32.py
|
Python
|
apache-2.0
| 1,398 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.conf import settings
from django.core.urlresolvers import reverse
from django.forms import widgets
from mox import IsA
from horizon import api
from horizon import test
class VolumeViewTests(test.TestCase):
@test.create_stubs({api: ('tenant_quota_usages', 'volume_create',
'volume_snapshot_list')})
def test_create_volume(self):
volume = self.volumes.first()
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50, 'snapshot_source': ''}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({api: ('tenant_quota_usages', 'volume_create',
'volume_snapshot_list'),
api.nova: ('volume_snapshot_get',)})
def test_create_volume_from_snapshot(self):
volume = self.volumes.first()
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
snapshot = self.volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50, 'snapshot_source': snapshot.id}
# first call- with url param
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=snapshot.id).\
AndReturn(volume)
# second call- with dropdown
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
snapshot_id=snapshot.id).\
AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
redirect_url = reverse('horizon:project:volumes:index')
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({api: ('tenant_quota_usages',),
api.nova: ('volume_snapshot_get',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage = {'gigabytes': {'available': 250}, 'volumes': {'available': 6}}
snapshot = self.volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.nova.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({api: ('tenant_quota_usages', 'volume_snapshot_list')})
def test_create_volume_gb_used_over_alloted_quota(self):
usage = {'gigabytes': {'available': 100, 'used': 20}}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 100GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({api: ('tenant_quota_usages', 'volume_snapshot_list')})
def test_create_volume_number_over_alloted_quota(self):
usage = {'gigabytes': {'available': 100, 'used': 20},
'volumes': {'available': 0}}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
api.volume_snapshot_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_snapshots.list())
api.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({api: ('volume_list',
'volume_delete',
'server_list')})
def test_delete_volume(self):
volume = self.volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.volume_delete(IsA(http.HttpRequest), volume.id)
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:index')
res = self.client.post(url, formData, follow=True)
self.assertMessageCount(res, count=0)
@test.create_stubs({api: ('volume_list',
'volume_delete',
'server_list')})
def test_delete_volume_error_existing_snapshot(self):
volume = self.volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
exc = self.exceptions.cinder.__class__(400,
"error: dependent snapshots")
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.volume_delete(IsA(http.HttpRequest), volume.id). \
AndRaise(exc)
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
api.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(self.volumes.list())
api.server_list(IsA(http.HttpRequest)).AndReturn(self.servers.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:index')
res = self.client.post(url, formData, follow=True)
self.assertMessageCount(res, error=1)
self.assertEqual(list(res.context['messages'])[0].message,
u'Unable to delete volume "%s". '
u'One or more snapshots depend on it.' %
volume.display_name)
@test.create_stubs({api: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments(self):
volume = self.volumes.first()
servers = self.servers.list()
api.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(servers)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:attach', args=[volume.id])
res = self.client.get(url)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
2)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
@test.create_stubs({api: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
PREV = settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point']
settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = False
volume = self.volumes.first()
servers = self.servers.list()
api.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(servers)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:attach', args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.HiddenInput))
settings.OPENSTACK_HYPERVISOR_FEATURES['can_set_mount_point'] = PREV
@test.create_stubs({api: ('volume_get',),
api.nova: ('server_get', 'server_list',)})
def test_edit_attachments_attached_volume(self):
server = self.servers.first()
volume = self.volumes.list()[0]
api.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
@test.create_stubs({api.nova: ('volume_get', 'server_get',)})
def test_detail_view(self):
volume = self.volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
api.nova.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res, "<dd>Volume name</dd>", 1, 200)
self.assertContains(res,
"<dd>41023e92-8008-4c8b-8059-7f2293ff3775</dd>",
1,
200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dd>40 GB</dd>", 1, 200)
self.assertContains(res,
("<a href=\"/project/instances/1/\">%s</a>"
% server.name),
1,
200)
self.assertNoMessages()
|
1ukash/horizon
|
horizon/dashboards/project/volumes/tests.py
|
Python
|
apache-2.0
| 14,507 |
from __future__ import absolute_import
import zope.interface.interface
from zope.interface.adapter import AdapterLookup as _AdapterLookup
from zope.interface.adapter import AdapterRegistry as _AdapterRegistry
from zope.interface.registry import Components, ComponentLookupError
__all__ = ('Registry',)
NO_CONTRACTS = 0
USE_CONTRACTS = 1
USE_CONTRACTS_WARN = 2
class AdapterLookup(_AdapterLookup):
def lookup(self, required, provided, name=u'', default=None):
factory = super(AdapterLookup, self).lookup(
required, provided, name=name, default=default)
if factory is None or self._registry.level == NO_CONTRACTS:
return factory
contract = getattr(provided, '__contract__', None)
if contract is not None:
return contract.bind_adapter(factory, self._registry.logger)
return factory
class AdapterRegistry(_AdapterRegistry):
level = NO_CONTRACTS
logger = None
LookupClass = AdapterLookup
def __init__(self, bases=(), logger=None):
self.logger = logger
super(AdapterRegistry, self).__init__(bases=bases)
def enable_contracts(self, level):
self.level = level
class Registry(Components):
""" Registry """
def __init__(self, name='', bases=(),
use_contracts=NO_CONTRACTS, flavor=None, logger=None):
self._use_contracts = use_contracts
self._flavor = flavor
self._logger = logger
super(Registry, self).__init__(name, bases)
def _init_registries(self):
self.adapters = AdapterRegistry(logger=self._logger)
self.utilities = AdapterRegistry(logger=self._logger)
@property
def flavor(self):
return self._flavor
def enable_contracts(self, warn_only=False):
if warn_only:
self._use_contracts = USE_CONTRACTS_WARN
self.adapters.enable_contracts(USE_CONTRACTS_WARN)
else:
self._use_contracts = USE_CONTRACTS
self.adapters.enable_contracts(USE_CONTRACTS)
def _adapter_hook(self, interface, object, name='', default=None):
return self.queryAdapter(object, interface, name, default)
def install(self, use_contracts=False):
zope.interface.interface.adapter_hooks.append(self._adapter_hook)
if use_contracts:
self.enable_contracts()
def uninstall(self):
if self._adapter_hook in zope.interface.interface.adapter_hooks:
zope.interface.interface.adapter_hooks.remove(self._adapter_hook)
def queryAdapter(self, object, interface, name=u'', default=None):
if isinstance(object, (tuple, list)):
adapter = self.adapters.queryMultiAdapter(
object, interface, name, default)
else:
adapter = self.adapters.queryAdapter(
object, interface, name, default)
if self._use_contracts == NO_CONTRACTS:
return adapter
contract = getattr(interface, 'contract', None)
if contract and adapter is not None:
return contract(adapter, logger=self._logger)
return adapter
def getAdapter(self, object, interface, name=u''):
adapter = self.adapters.queryAdapter(object, interface, name)
if adapter is None:
raise ComponentLookupError(object, interface, name)
if self._use_contracts == NO_CONTRACTS:
return adapter
contract = getattr(interface, 'contract', None)
if contract:
return contract(adapter, logger=self._logger)
return adapter
def __enter__(self):
self.install()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.uninstall()
return False
|
fafhrd91/mdl
|
mdl/registry.py
|
Python
|
apache-2.0
| 3,760 |
def tracer(func): # State via enclosing scope and func attr
def wrapper(*args, **kwargs): # calls is per-function, not global
wrapper.calls += 1
print('call %s to %s' % (wrapper.calls, func.__name__))
return func(*args, **kwargs)
wrapper.calls = 0
return wrapper
@tracer
def spam(a, b, c): # Same as: spam = tracer(spam)
print(a + b + c)
@tracer
def eggs(x, y): # Same as: eggs = tracer(eggs)
print(x ** y)
spam(1, 2, 3) # Really calls wrapper, assigned to spam
spam(a=4, b=5, c=6) # wrapper calls spam
eggs(2, 16) # Really calls wrapper, assigned to eggs
eggs(4, y=4) # wrapper.calls _is_ per-decoration here
|
dreadrel/UWF_2014_spring_COP3990C-2507
|
notebooks/scripts/book_code/code/decorator5.py
|
Python
|
apache-2.0
| 750 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
import google.api_core.operations_v1
from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc
class BigtableInstanceAdminGrpcTransport(object):
"""gRPC transport class providing stubs for
google.bigtable.admin.v2 BigtableInstanceAdmin API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
'https://www.googleapis.com/auth/bigtable.admin',
'https://www.googleapis.com/auth/bigtable.admin.cluster',
'https://www.googleapis.com/auth/bigtable.admin.instance',
'https://www.googleapis.com/auth/bigtable.admin.table',
'https://www.googleapis.com/auth/cloud-bigtable.admin',
'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster',
'https://www.googleapis.com/auth/cloud-bigtable.admin.table',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
)
def __init__(self,
channel=None,
credentials=None,
address='bigtableadmin.googleapis.com:443'):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments are mutually '
'exclusive.', )
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
)
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
'bigtable_instance_admin_stub':
bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub(
channel),
}
# Because this API includes a method that returns a
# long-running operation (proto: google.longrunning.Operation),
# instantiate an LRO client.
self._operations_client = google.api_core.operations_v1.OperationsClient(
channel)
@classmethod
def create_channel(cls,
address='bigtableadmin.googleapis.com:443',
credentials=None):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=cls._OAUTH_SCOPES,
)
@property
def create_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Create an instance within a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].CreateInstance
@property
def get_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information about an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].GetInstance
@property
def list_instances(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists information about instances in a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].ListInstances
@property
def update_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates an instance within a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].UpdateInstance
@property
def partial_update_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Partially updates an instance within a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs[
'bigtable_instance_admin_stub'].PartialUpdateInstance
@property
def delete_instance(self):
"""Return the gRPC stub for {$apiMethod.name}.
Delete an instance from a project.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].DeleteInstance
@property
def create_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates a cluster within an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].CreateCluster
@property
def get_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information about a cluster.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].GetCluster
@property
def list_clusters(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists information about clusters in an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].ListClusters
@property
def update_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates a cluster within an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].UpdateCluster
@property
def delete_cluster(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes a cluster from an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].DeleteCluster
@property
def create_app_profile(self):
"""Return the gRPC stub for {$apiMethod.name}.
Creates an app profile within an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].CreateAppProfile
@property
def get_app_profile(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets information about an app profile.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].GetAppProfile
@property
def list_app_profiles(self):
"""Return the gRPC stub for {$apiMethod.name}.
Lists information about app profiles in an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].ListAppProfiles
@property
def update_app_profile(self):
"""Return the gRPC stub for {$apiMethod.name}.
Updates an app profile within an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].UpdateAppProfile
@property
def delete_app_profile(self):
"""Return the gRPC stub for {$apiMethod.name}.
Deletes an app profile from an instance.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].DeleteAppProfile
@property
def get_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Gets the access control policy for an instance resource. Returns an empty
policy if an instance exists but does not have a policy set.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].GetIamPolicy
@property
def set_iam_policy(self):
"""Return the gRPC stub for {$apiMethod.name}.
Sets the access control policy on an instance resource. Replaces any
existing policy.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].SetIamPolicy
@property
def test_iam_permissions(self):
"""Return the gRPC stub for {$apiMethod.name}.
Returns permissions that the caller has on the specified instance resource.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs['bigtable_instance_admin_stub'].TestIamPermissions
|
tseaver/gcloud-python
|
bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py
|
Python
|
apache-2.0
| 12,786 |
import ast
import logging
import time
import unittest
from malcolm.profiler import Profiler
# https://github.com/bdarnell/plop/blob/master/plop/test/collector_test.py
class ProfilerTest(unittest.TestCase):
def filter_stacks(self, results):
# Kind of hacky, but this is the simplest way to keep the tests
# working after the internals of the collector changed to support
# multiple formatters.
stack_counts = ast.literal_eval(results)
counts = {}
for stack, count in stack_counts.items():
filtered_stack = [
frame[2] for frame in stack if frame[0].endswith("test_profiler.py")
]
if filtered_stack:
counts[tuple(filtered_stack)] = count
return counts
def check_counts(self, counts, expected):
failed = False
output = []
for stack, count in expected.items():
# every expected frame should appear in the data, but
# the inverse is not true if the signal catches us between
# calls.
self.assertTrue(stack in counts)
ratio = float(counts[stack]) / float(count)
output.append(
"%s: expected %s, got %s (%s)" % (stack, count, counts[stack], ratio)
)
if not (0.70 <= ratio <= 1.25):
failed = True
if failed:
for line in output:
logging.warning(line)
for key in set(counts.keys()) - set(expected.keys()):
logging.warning("unexpected key: %s: got %s" % (key, counts[key]))
self.fail("collected data did not meet expectations")
def test_collector(self):
start = time.time()
def a(end):
while time.time() < end:
pass
c(time.time() + 0.1)
def b(end):
while time.time() < end:
pass
c(time.time() + 0.1)
def c(end):
while time.time() < end:
pass
profiler = Profiler("/tmp")
profiler.start(interval=0.01)
a(time.time() + 0.1)
b(time.time() + 0.2)
c(time.time() + 0.3)
end = time.time()
profiler.stop("profiler_test.plop")
elapsed = end - start
self.assertTrue(0.8 < elapsed < 0.9, elapsed)
with open("/tmp/profiler_test.plop") as f:
results = f.read()
counts = self.filter_stacks(results)
expected = {
("a", "test_collector"): 10,
("c", "a", "test_collector"): 10,
("b", "test_collector"): 20,
("c", "b", "test_collector"): 10,
("c", "test_collector"): 30,
}
self.check_counts(counts, expected)
|
dls-controls/pymalcolm
|
tests/test_profiler.py
|
Python
|
apache-2.0
| 2,778 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from unittest.mock import MagicMock, Mock
import pytest
from pytest import fixture
from airflow.exceptions import AirflowException
from airflow.models.connection import Connection
from airflow.providers.microsoft.azure.hooks.azure_data_factory import (
AzureDataFactoryHook,
provide_targeted_factory,
)
from airflow.utils import db
DEFAULT_RESOURCE_GROUP = "defaultResourceGroup"
RESOURCE_GROUP = "testResourceGroup"
DEFAULT_FACTORY = "defaultFactory"
FACTORY = "testFactory"
MODEL = object()
NAME = "testName"
ID = "testId"
def setup_module():
connection = Connection(
conn_id="azure_data_factory_test",
conn_type="azure_data_factory",
login="clientId",
password="clientSecret",
extra=json.dumps(
{
"tenantId": "tenantId",
"subscriptionId": "subscriptionId",
"resourceGroup": DEFAULT_RESOURCE_GROUP,
"factory": DEFAULT_FACTORY,
}
),
)
db.merge_conn(connection)
@fixture
def hook():
client = AzureDataFactoryHook(conn_id="azure_data_factory_test")
client._conn = MagicMock(
spec=[
"factories",
"linked_services",
"datasets",
"pipelines",
"pipeline_runs",
"triggers",
"trigger_runs",
]
)
return client
def parametrize(explicit_factory, implicit_factory):
def wrapper(func):
return pytest.mark.parametrize(
("user_args", "sdk_args"),
(explicit_factory, implicit_factory),
ids=("explicit factory", "implicit factory"),
)(func)
return wrapper
def test_provide_targeted_factory():
def echo(_, resource_group_name=None, factory_name=None):
return resource_group_name, factory_name
conn = MagicMock()
hook = MagicMock()
hook.get_connection.return_value = conn
conn.extra_dejson = {}
assert provide_targeted_factory(echo)(hook, RESOURCE_GROUP, FACTORY) == (RESOURCE_GROUP, FACTORY)
conn.extra_dejson = {"resourceGroup": DEFAULT_RESOURCE_GROUP, "factory": DEFAULT_FACTORY}
assert provide_targeted_factory(echo)(hook) == (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)
with pytest.raises(AirflowException):
conn.extra_dejson = {}
provide_targeted_factory(echo)(hook)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_get_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_factory(*user_args)
hook._conn.factories.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_create_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._factory_exists = Mock(return_value=True)
hook.update_factory(*user_args)
hook._conn.factories.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, MODEL)),
implicit_factory=((MODEL,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, MODEL)),
)
def test_update_factory_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._factory_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Factory .+ does not exist"):
hook.update_factory(*user_args)
@parametrize(
explicit_factory=((RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY)),
implicit_factory=((), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY)),
)
def test_delete_factory(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_factory(*user_args)
hook._conn.factories.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_linked_service(*user_args)
hook._conn.linked_services.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._linked_service_exists = Mock(return_value=True)
hook.update_linked_service(*user_args)
hook._conn.linked_services.create_or_update(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_linked_service_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._linked_service_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Linked service .+ does not exist"):
hook.update_linked_service(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_linked_service(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_linked_service(*user_args)
hook._conn.linked_services.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_dataset(*user_args)
hook._conn.datasets.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._dataset_exists = Mock(return_value=True)
hook.update_dataset(*user_args)
hook._conn.datasets.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_dataset_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._dataset_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Dataset .+ does not exist"):
hook.update_dataset(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_dataset(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_dataset(*user_args)
hook._conn.datasets.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_pipeline(*user_args)
hook._conn.pipelines.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_pipeline(*user_args)
hook._conn.pipelines.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._pipeline_exists = Mock(return_value=True)
hook.update_pipeline(*user_args)
hook._conn.pipelines.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_pipeline_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._pipeline_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Pipeline .+ does not exist"):
hook.update_pipeline(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_pipeline(*user_args)
hook._conn.pipelines.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_run_pipeline(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.run_pipeline(*user_args)
hook._conn.pipelines.create_run.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, ID)),
implicit_factory=((ID,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, ID)),
)
def test_get_pipeline_run(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_pipeline_run(*user_args)
hook._conn.pipeline_runs.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, ID)),
implicit_factory=((ID,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, ID)),
)
def test_cancel_pipeline_run(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.cancel_pipeline_run(*user_args)
hook._conn.pipeline_runs.cancel.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_get_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.get_trigger(*user_args)
hook._conn.triggers.get.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_create_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.create_trigger(*user_args)
hook._conn.triggers.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._trigger_exists = Mock(return_value=True)
hook.update_trigger(*user_args)
hook._conn.triggers.create_or_update.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, MODEL, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, MODEL)),
implicit_factory=((NAME, MODEL), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, MODEL)),
)
def test_update_trigger_non_existent(hook: AzureDataFactoryHook, user_args, sdk_args):
hook._trigger_exists = Mock(return_value=False)
with pytest.raises(AirflowException, match=r"Trigger .+ does not exist"):
hook.update_trigger(*user_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_delete_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.delete_trigger(*user_args)
hook._conn.triggers.delete.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_start_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.start_trigger(*user_args)
hook._conn.triggers.begin_start.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME)),
implicit_factory=((NAME,), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME)),
)
def test_stop_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.stop_trigger(*user_args)
hook._conn.triggers.begin_stop.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, ID)),
implicit_factory=((NAME, ID), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, ID)),
)
def test_rerun_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.rerun_trigger(*user_args)
hook._conn.trigger_runs.rerun.assert_called_with(*sdk_args)
@parametrize(
explicit_factory=((NAME, ID, RESOURCE_GROUP, FACTORY), (RESOURCE_GROUP, FACTORY, NAME, ID)),
implicit_factory=((NAME, ID), (DEFAULT_RESOURCE_GROUP, DEFAULT_FACTORY, NAME, ID)),
)
def test_cancel_trigger(hook: AzureDataFactoryHook, user_args, sdk_args):
hook.cancel_trigger(*user_args)
hook._conn.trigger_runs.cancel.assert_called_with(*sdk_args)
|
dhuang/incubator-airflow
|
tests/providers/microsoft/azure/hooks/test_azure_data_factory.py
|
Python
|
apache-2.0
| 15,858 |
# Copyright (c) 2016 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
from neutron.db.models import dns as models
from neutron.objects import base
from neutron.objects import common_types
@obj_base.VersionedObjectRegistry.register
class FloatingIPDNS(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models.FloatingIPDNS
primary_keys = ['floatingip_id']
fields = {
'floatingip_id': obj_fields.UUIDField(),
'dns_name': common_types.DomainNameField(),
'dns_domain': common_types.DomainNameField(),
'published_dns_name': common_types.DomainNameField(),
'published_dns_domain': common_types.DomainNameField(),
}
|
cloudbase/neutron
|
neutron/objects/floatingip.py
|
Python
|
apache-2.0
| 1,361 |
from ajax_select import make_ajax_form
from ajax_select.admin import AjaxSelectAdmin
from bitfield import BitField
from bitfield.admin import BitFieldListFilter
from bitfield.forms import BitFieldCheckboxSelectMultiple
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.html import format_html
from django_object_actions import DjangoObjectActions
from phonenumber_field.modelfields import PhoneNumberField
from phonenumber_field.widgets import PhoneNumberPrefixWidget
from related_admin import RelatedFieldAdmin
from orchestra.communication.slack import get_slack_user_id
from orchestra.models import Certification
from orchestra.models import CommunicationPreference
from orchestra.models import Iteration
from orchestra.models import PayRate
from orchestra.models import Project
from orchestra.models import SanityCheck
from orchestra.models import StaffBotRequest
from orchestra.models import StaffingRequestInquiry
from orchestra.models import StaffingResponse
from orchestra.models import Step
from orchestra.models import Task
from orchestra.models import TaskAssignment
from orchestra.models import TimeEntry
from orchestra.models import Todo
from orchestra.models import TodoQA
from orchestra.models import TodoListTemplate
from orchestra.models import TodoListTemplateImportRecord
from orchestra.models import Worker
from orchestra.models import WorkerCertification
from orchestra.models import WorkerAvailability
from orchestra.models import Workflow
from orchestra.models import WorkflowVersion
from orchestra.todos.import_export import export_to_spreadsheet
admin.site.site_header = 'Orchestra'
admin.site.site_title = 'Orchestra'
admin.site.index_title = 'Orchestra'
@admin.register(Certification)
class CertificationAdmin(admin.ModelAdmin):
list_display = ('id', 'slug', 'workflow', 'name')
ordering = ('slug',)
search_fields = ('slug', 'description', 'name')
list_filter = ('workflow',)
@admin.register(Iteration)
class IterationAdmin(AjaxSelectAdmin):
form = make_ajax_form(Iteration, {
'assignment': 'task_assignments'
})
list_display = (
'id', 'edit_assignment', 'start_datetime', 'end_datetime',
'status')
search_fields = (
'assignment__task__step__name',
'assignment__task__project__short_description',
'assignment__worker__user__username')
ordering = ('assignment__worker__user__username',)
list_filter = ('status', 'assignment__worker__user__username')
def edit_assignment(self, obj):
return edit_link(obj.assignment)
@admin.register(PayRate)
class PayRateAdmin(AjaxSelectAdmin):
form = make_ajax_form(PayRate, {
'worker': 'workers'
})
list_display = (
'id', 'edit_worker', 'hourly_rate', 'hourly_multiplier', 'start_date',
'end_date')
search_fields = ('worker__user__username',)
ordering = ('worker__user__username',)
list_filter = ('worker',)
def edit_worker(self, obj):
return edit_link(obj.worker)
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
list_display = (
'id', 'short_description', 'workflow_version', 'start_datetime')
search_fields = ('short_description',
'workflow_version__slug',
'workflow_version__workflow__slug',)
list_filter = ('workflow_version',)
@admin.register(SanityCheck)
class SanityCheckAdmin(AjaxSelectAdmin):
form = make_ajax_form(SanityCheck, {
'project': 'projects',
})
list_display = ('id', 'created_at', 'project', 'check_slug', 'handled_at')
ordering = ('-created_at',)
search_fields = (
'project__short_description', 'check_slug')
list_filter = ('project__workflow_version',)
@admin.register(Step)
class StepAdmin(admin.ModelAdmin):
list_display = (
'id', 'slug', 'workflow_version', 'name', 'description', 'is_human')
ordering = ('slug',)
search_fields = ('slug', 'name', 'description',)
list_filter = ('workflow_version', 'is_human')
@admin.register(Task)
class TaskAdmin(AjaxSelectAdmin):
form = make_ajax_form(Task, {
'project': 'projects',
})
list_display = (
'id', 'edit_project', 'step_name', 'workflow_version',
'start_datetime')
ordering = ('-project', 'start_datetime',)
search_fields = ('project__short_description', 'step__name',)
list_filter = ('step__is_human', 'project__workflow_version')
def step_name(self, obj):
return obj.step.name
def workflow_version(self, obj):
return obj.project.workflow_version
def edit_project(self, obj):
return edit_link(obj.project, obj.project.short_description)
@admin.register(TaskAssignment)
class TaskAssignmentAdmin(AjaxSelectAdmin):
form = make_ajax_form(TaskAssignment, {
'worker': 'workers',
'task': 'tasks',
})
list_display = (
'id', 'edit_project', 'edit_task', 'assignment_counter', 'edit_worker',
'workflow_version', 'start_datetime')
ordering = ('-task__project', 'task__start_datetime', 'assignment_counter')
search_fields = (
'task__project__short_description', 'task__step__name',
'worker__user__username')
list_filter = ('task__step__is_human', 'task__project__workflow_version')
def workflow_version(self, obj):
return obj.task.project.workflow_version
def edit_task(self, obj):
return edit_link(obj.task, obj.task.step.name)
def edit_project(self, obj):
return edit_link(obj.task.project, obj.task.project.short_description)
def edit_worker(self, obj):
return edit_link(obj.worker)
@admin.register(TimeEntry)
class TimeEntryAdmin(AjaxSelectAdmin):
form = make_ajax_form(TimeEntry, {
'worker': 'workers',
'assignment': 'task_assignments',
})
list_display = ('id', 'date', 'worker', 'time_worked', 'assignment')
search_fields = (
'id', 'worker__user__username', 'assignment__task__step__name',
'assignment__task__project__short_description')
list_filter = ('worker',)
@admin.register(Todo)
class TodoAdmin(admin.ModelAdmin):
# TODO(murat): remove `task` with its removal from the model
autocomplete_fields = ('task', 'project', 'step', 'parent_todo')
list_display = ('id', 'created_at', 'task', 'title', 'completed')
ordering = ('-created_at',)
search_fields = (
'project__short_description', 'step__name',
'title')
list_filter = ('project__workflow_version',)
@admin.register(TodoQA)
class TodoQAAdmin(AjaxSelectAdmin):
list_display = ('id', 'created_at', 'todo', 'comment', 'approved')
ordering = ('-created_at',)
search_fields = ('todo__title', 'comment',)
@admin.register(TodoListTemplateImportRecord)
class TodoListTemplateImportRecordAdmin(AjaxSelectAdmin):
list_display = ('id', 'created_at', 'todo_list_template', 'importer')
list_filter = ('todo_list_template',)
search_fields = (
'todo_list_template__slug',
'todo_list_template__name',
'todo_list_template__description',
'import_url'
)
ordering = ('-created_at',)
@admin.register(TodoListTemplate)
class TodoListTemplateAdmin(DjangoObjectActions, AjaxSelectAdmin):
change_actions = ('export_spreadsheet', 'import_spreadsheet')
form = make_ajax_form(TodoListTemplate, {
'creator': 'workers',
})
list_display = ('id', 'created_at', 'slug', 'name')
ordering = ('-created_at',)
search_fields = (
'slug', 'name', 'todos',
'description')
list_filter = ('creator__user__username',)
def export_spreadsheet(self, request, todo_list_template):
return HttpResponseRedirect(export_to_spreadsheet(todo_list_template))
export_spreadsheet.attrs = {'target': '_blank'}
export_spreadsheet.short_description = 'Export to spreadsheet'
export_spreadsheet.label = 'Export to spreadsheet'
def import_spreadsheet(self, request, todo_list_template):
return redirect(
'orchestra:todos:import_todo_list_template_from_spreadsheet',
pk=todo_list_template.id)
import_spreadsheet.short_description = 'Import from spreadsheet'
import_spreadsheet.label = 'Import from spreadsheet'
@admin.register(Worker)
class WorkerAdmin(AjaxSelectAdmin):
form = make_ajax_form(Worker, {
'user': 'users'
})
list_display = ('id', 'edit_user', 'email', 'slack_username', 'phone')
ordering = ('user__username',)
readonly_fields = ('slack_user_id',)
search_fields = ('user__username', 'user__email', 'slack_username')
formfield_overrides = {
PhoneNumberField: {'widget': PhoneNumberPrefixWidget(initial='US')},
}
def save_model(self, request, obj, form, change):
instance = form.save(commit=False)
instance.slack_user_id = get_slack_user_id(
form.data.get('slack_username'))
instance.save()
def edit_user(self, obj):
return edit_link(obj.user, obj.user.username)
def email(self, obj):
return obj.user.email
@admin.register(WorkerCertification)
class WorkerCertificationAdmin(AjaxSelectAdmin):
form = make_ajax_form(WorkerCertification, {
'worker': 'workers'
})
list_display = ('id', 'worker', 'certification', 'role', 'task_class')
search_fields = (
'worker__user__username', 'certification__slug', 'certification__name',
'certification__workflow__slug', 'certification__workflow__name',
'certification__workflow__description')
ordering = ('-created_at',)
list_filter = ('task_class', 'role')
@admin.register(WorkerAvailability)
class WorkerAvailabilityAdmin(admin.ModelAdmin):
list_display = ('id', 'worker', 'week',)
search_fields = ('worker__user__username', 'worker__user__email',)
autocomplete_fields = ('worker',)
ordering = ('-week',)
@admin.register(Workflow)
class WorkflowAdmin(admin.ModelAdmin):
list_display = (
'id', 'slug', 'name', 'description')
ordering = ('slug',)
search_fields = ('slug', 'name', 'description',)
@admin.register(WorkflowVersion)
class WorkflowVersionAdmin(admin.ModelAdmin):
list_display = (
'id', 'slug', 'workflow', 'name', 'description')
ordering = ('workflow__slug',)
search_fields = ('workflow__slug', 'slug', 'name', 'description')
list_filter = ('workflow',)
@admin.register(CommunicationPreference)
class CommunicationPreferenceAdmin(AjaxSelectAdmin):
form = make_ajax_form(CommunicationPreference, {
'worker': 'workers'
})
formfield_overrides = {
BitField: {'widget': BitFieldCheckboxSelectMultiple},
}
list_display = (
'id', 'worker', 'methods', 'communication_type'
)
search_fields = ('worker__user__username', 'methods', 'communication_type')
list_filter = ('worker__user__username', ('methods', BitFieldListFilter))
@admin.register(StaffBotRequest)
class StaffBotRequestAdmin(RelatedFieldAdmin, AjaxSelectAdmin):
form = make_ajax_form(StaffBotRequest, {
'task': 'tasks',
})
list_display = (
'id', 'task', 'created_at', 'project_description',
)
search_fields = (
'project_description', 'task__id'
)
@admin.register(StaffingRequestInquiry)
class StaffingRequestInquiryAdmin(RelatedFieldAdmin, AjaxSelectAdmin):
form = make_ajax_form(StaffingRequestInquiry, {
'communication_preference': 'communication_preferences',
})
list_display = (
'id', 'communication_preference__worker', 'communication_method',
'request',
'created_at',
)
search_fields = (
'communication_preference__worker__user__username',
'communication_method', 'request__project_description',
'request__task__id'
)
raw_id_fields = ('request',)
@admin.register(StaffingResponse)
class StaffingResponseAdmin(RelatedFieldAdmin, AjaxSelectAdmin):
form = make_ajax_form(StaffingResponse, {
'request_inquiry': 'staffing_request_inquiries',
})
list_display = (
'id',
'request_inquiry__request__task__id',
'request_inquiry__communication_preference__worker__user',
'created_at',
)
search_fields = (
'request_inquiry__communication_preference__worker__user__username',
'request_inquiry__communication_method',
'request_inquiry__request__project_description',
'request_inquiry__request__task__id'
)
def edit_link(instance, text=None):
if not instance:
return None
if text is None:
text = str(instance)
change_url_name = 'admin:{}_{}_change'.format(
instance._meta.app_label, instance._meta.model_name)
change_url = reverse(change_url_name, args=(instance.id,))
return linkify(change_url, text=text)
def linkify(url, text=None, new_window=False):
if text is None:
text = url
target_string = ''
if new_window:
target_string = '_blank'
return format_html(
'<a href="{}" target="{}">{}</a>', url, target_string, text)
|
b12io/orchestra
|
orchestra/admin.py
|
Python
|
apache-2.0
| 13,202 |
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
class TextTabStopTextAfterTab_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 Hinten\n$1')
keys = 'test' + EX + 'hallo'
wanted = 'hallo Hinten\nhallo'
class TextTabStopTextBeforeTab_ExpectCorrectResult(_VimTest):
snippets = ('test', 'Vorne $1\n$1')
keys = 'test' + EX + 'hallo'
wanted = 'Vorne hallo\nhallo'
class TextTabStopTextSurroundedTab_ExpectCorrectResult(_VimTest):
snippets = ('test', 'Vorne $1 Hinten\n$1')
keys = 'test' + EX + 'hallo test'
wanted = 'Vorne hallo test Hinten\nhallo test'
class TextTabStopTextBeforeMirror_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\nVorne $1')
keys = 'test' + EX + 'hallo'
wanted = 'hallo\nVorne hallo'
class TextTabStopAfterMirror_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1 Hinten')
keys = 'test' + EX + 'hallo'
wanted = 'hallo\nhallo Hinten'
class TextTabStopSurroundMirror_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\nVorne $1 Hinten')
keys = 'test' + EX + 'hallo welt'
wanted = 'hallo welt\nVorne hallo welt Hinten'
class TextTabStopAllSurrounded_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ObenVorne $1 ObenHinten\nVorne $1 Hinten')
keys = 'test' + EX + 'hallo welt'
wanted = 'ObenVorne hallo welt ObenHinten\nVorne hallo welt Hinten'
class MirrorBeforeTabstopLeave_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 ${1:this is it} $1')
keys = 'test' + EX
wanted = 'this is it this is it this is it'
class MirrorBeforeTabstopOverwrite_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 ${1:this is it} $1')
keys = 'test' + EX + 'a'
wanted = 'a a a'
class TextTabStopSimpleMirrorMultiline_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1')
keys = 'test' + EX + 'hallo'
wanted = 'hallo\nhallo'
class SimpleMirrorMultilineMany_ExpectCorrectResult(_VimTest):
snippets = ('test', ' $1\n$1\na$1b\n$1\ntest $1 mich')
keys = 'test' + EX + 'hallo'
wanted = ' hallo\nhallo\nahallob\nhallo\ntest hallo mich'
class MultilineTabStopSimpleMirrorMultiline_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n\n$1\n\n$1')
keys = 'test' + EX + 'hallo Du\nHi'
wanted = 'hallo Du\nHi\n\nhallo Du\nHi\n\nhallo Du\nHi'
class MultilineTabStopSimpleMirrorMultiline1_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1\n$1')
keys = 'test' + EX + 'hallo Du\nHi'
wanted = 'hallo Du\nHi\nhallo Du\nHi\nhallo Du\nHi'
class MultilineTabStopSimpleMirrorDeleteInLine_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1\n$1')
keys = 'test' + EX + 'hallo Du\nHi\b\bAch Blah'
wanted = 'hallo Du\nAch Blah\nhallo Du\nAch Blah\nhallo Du\nAch Blah'
class TextTabStopSimpleMirrorMultilineMirrorInFront_ECR(_VimTest):
snippets = ('test', '$1\n${1:sometext}')
keys = 'test' + EX + 'hallo\nagain'
wanted = 'hallo\nagain\nhallo\nagain'
class SimpleMirrorDelete_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1')
keys = 'test' + EX + 'hallo\b\b'
wanted = 'hal\nhal'
class SimpleMirrorSameLine_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 $1')
keys = 'test' + EX + 'hallo'
wanted = 'hallo hallo'
class SimpleMirrorSameLine_InText_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 $1')
keys = 'ups test blah' + ESC + '02f i' + EX + 'hallo'
wanted = 'ups hallo hallo blah'
class SimpleMirrorSameLineBeforeTabDefVal_ECR(_VimTest):
snippets = ('test', '$1 ${1:replace me}')
keys = 'test' + EX + 'hallo foo'
wanted = 'hallo foo hallo foo'
class SimpleMirrorSameLineBeforeTabDefVal_DelB4Typing_ECR(_VimTest):
snippets = ('test', '$1 ${1:replace me}')
keys = 'test' + EX + BS + 'hallo foo'
wanted = 'hallo foo hallo foo'
class SimpleMirrorSameLineMany_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 $1 $1 $1')
keys = 'test' + EX + 'hallo du'
wanted = 'hallo du hallo du hallo du hallo du'
class SimpleMirrorSameLineManyMultiline_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1 $1 $1 $1')
keys = 'test' + EX + 'hallo du\nwie gehts'
wanted = 'hallo du\nwie gehts hallo du\nwie gehts hallo du\nwie gehts' \
' hallo du\nwie gehts'
class SimpleMirrorDeleteSomeEnterSome_ExpectCorrectResult(_VimTest):
snippets = ('test', '$1\n$1')
keys = 'test' + EX + 'hallo\b\bhups'
wanted = 'halhups\nhalhups'
class SimpleTabstopWithDefaultSimpelType_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:defa}\n$1')
keys = 'test' + EX + 'world'
wanted = 'ha world\nworld'
class SimpleTabstopWithDefaultComplexType_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:default value} $1\nanother: $1 mirror')
keys = 'test' + EX + 'world'
wanted = 'ha world world\nanother: world mirror'
class SimpleTabstopWithDefaultSimpelKeep_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:defa}\n$1')
keys = 'test' + EX
wanted = 'ha defa\ndefa'
class SimpleTabstopWithDefaultComplexKeep_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:default value} $1\nanother: $1 mirror')
keys = 'test' + EX
wanted = 'ha default value default value\nanother: default value mirror'
class TabstopWithMirrorManyFromAll_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $5 ${1:blub} $4 $0 ${2:$1.h} $1 $3 ${4:More}')
keys = 'test' + EX + 'hi' + JF + 'hu' + JF + 'hub' + JF + 'hulla' + \
JF + 'blah' + JF + 'end'
wanted = 'ha blah hi hulla end hu hi hub hulla'
class TabstopWithMirrorInDefaultNoType_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:blub} ${2:$1.h}')
keys = 'test' + EX
wanted = 'ha blub blub.h'
class TabstopWithMirrorInDefaultNoType1_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha ${1:blub} ${2:$1}')
keys = 'test' + EX
wanted = 'ha blub blub'
class TabstopWithMirrorInDefaultTwiceAndExtra_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:$1.h $1.c}\ntest $1')
keys = 'test' + EX + 'stdin'
wanted = 'ha stdin stdin.h stdin.c\ntest stdin'
class TabstopWithMirrorInDefaultMultipleLeave_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:snip} ${3:$1.h $2}')
keys = 'test' + EX + 'stdin'
wanted = 'ha stdin snip stdin.h snip'
class TabstopWithMirrorInDefaultMultipleOverwrite_ExpectCorrectResult(
_VimTest):
snippets = ('test', 'ha $1 ${2:snip} ${3:$1.h $2}')
keys = 'test' + EX + 'stdin' + JF + 'do snap'
wanted = 'ha stdin do snap stdin.h do snap'
class TabstopWithMirrorInDefaultOverwrite_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:$1.h}')
keys = 'test' + EX + 'stdin' + JF + 'overwritten'
wanted = 'ha stdin overwritten'
class TabstopWithMirrorInDefaultOverwrite1_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:$1}')
keys = 'test' + EX + 'stdin' + JF + 'overwritten'
wanted = 'ha stdin overwritten'
class TabstopWithMirrorInDefaultNoOverwrite1_ExpectCorrectResult(_VimTest):
snippets = ('test', 'ha $1 ${2:$1}')
keys = 'test' + EX + 'stdin' + JF + JF + 'end'
wanted = 'ha stdin stdinend'
class MirrorRealLifeExample_ExpectCorrectResult(_VimTest):
snippets = (
('for', 'for(size_t ${2:i} = 0; $2 < ${1:count}; ${3:++$2})'
'\n{\n\t${0:/* code */}\n}'),
)
keys = 'for' + EX + '100' + JF + 'avar\b\b\b\ba_variable' + JF + \
'a_variable *= 2' + JF + '// do nothing'
wanted = """for(size_t a_variable = 0; a_variable < 100; a_variable *= 2)
{
\t// do nothing
}"""
class Mirror_TestKill_InsertBefore_NoKill(_VimTest):
snippets = 'test', '$1 $1_'
keys = 'hallo test' + EX + 'auch' + ESC + \
'wihi' + ESC + 'bb' + 'ino' + JF + 'end'
wanted = 'hallo noauch hinoauch_end'
class Mirror_TestKill_InsertAfter_NoKill(_VimTest):
snippets = 'test', '$1 $1_'
keys = 'hallo test' + EX + 'auch' + ESC + \
'eiab' + ESC + 'bb' + 'ino' + JF + 'end'
wanted = 'hallo noauch noauchab_end'
class Mirror_TestKill_InsertBeginning_Kill(_VimTest):
snippets = 'test', '$1 $1_'
keys = 'hallo test' + EX + 'auch' + ESC + \
'wahi' + ESC + 'bb' + 'ino' + JF + 'end'
wanted = 'hallo noauch ahiuch_end'
class Mirror_TestKill_InsertEnd_Kill(_VimTest):
snippets = 'test', '$1 $1_'
keys = 'hallo test' + EX + 'auch' + ESC + \
'ehihi' + ESC + 'bb' + 'ino' + JF + 'end'
wanted = 'hallo noauch auchih_end'
class Mirror_TestKillTabstop_Kill(_VimTest):
snippets = 'test', 'welt${1:welt${2:welt}welt} $2'
keys = 'hallo test' + EX + 'elt'
wanted = 'hallo weltelt '
|
yslin/tools-zodlin
|
ubuntu/vim/.vim/lang/all/ultisnips/test/test_Mirror.py
|
Python
|
apache-2.0
| 8,787 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from requests import exceptions
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
from heat.common import template_format
from heat.common import urlfetch
from heat.engine import attributes
from heat.engine import environment
from heat.engine import properties
from heat.engine.resources import stack_resource
from heat.engine import template
from heat.rpc import api as rpc_api
LOG = logging.getLogger(__name__)
REMOTE_SCHEMES = ('http', 'https')
LOCAL_SCHEMES = ('file',)
STACK_ID_OUTPUT = 'OS::stack_id'
def generate_class_from_template(name, data, param_defaults):
tmpl = template.Template(template_format.parse(data))
props, attrs = TemplateResource.get_schemas(tmpl, param_defaults)
cls = type(name, (TemplateResource,),
{'properties_schema': props,
'attributes_schema': attrs,
'__doc__': tmpl.t.get(tmpl.DESCRIPTION)})
return cls
class TemplateResource(stack_resource.StackResource):
"""A resource implemented by a nested stack.
This implementation passes resource properties as parameters to the nested
stack. Outputs of the nested stack are exposed as attributes of this
resource.
"""
def __init__(self, name, json_snippet, stack):
self._parsed_nested = None
self.stack = stack
self.validation_exception = None
tri = self._get_resource_info(json_snippet)
self.properties_schema = {}
self.attributes_schema = {}
# run Resource.__init__() so we can call self.nested()
super(TemplateResource, self).__init__(name, json_snippet, stack)
self.resource_info = tri
if self.validation_exception is None:
self._generate_schema()
self.reparse()
def _get_resource_info(self, rsrc_defn):
try:
tri = self.stack.env.get_resource_info(
rsrc_defn.resource_type,
resource_name=rsrc_defn.name,
registry_type=environment.TemplateResourceInfo)
except exception.EntityNotFound:
self.validation_exception = ValueError(_(
'Only Templates with an extension of .yaml or '
'.template are supported'))
else:
self._template_name = tri.template_name
self.resource_type = tri.name
self.resource_path = tri.path
if tri.user_resource:
self.allowed_schemes = REMOTE_SCHEMES
else:
self.allowed_schemes = REMOTE_SCHEMES + LOCAL_SCHEMES
return tri
@staticmethod
def get_template_file(template_name, allowed_schemes):
try:
return urlfetch.get(template_name, allowed_schemes=allowed_schemes)
except (IOError, exceptions.RequestException) as r_exc:
args = {'name': template_name, 'exc': str(r_exc)}
msg = _('Could not fetch remote template '
'"%(name)s": %(exc)s') % args
raise exception.NotFound(msg_fmt=msg)
@staticmethod
def get_schemas(tmpl, param_defaults):
return ((properties.Properties.schema_from_params(
tmpl.param_schemata(param_defaults))),
(attributes.Attributes.schema_from_outputs(
tmpl[tmpl.OUTPUTS])))
def _generate_schema(self):
self._parsed_nested = None
try:
tmpl = template.Template(self.child_template())
except (exception.NotFound, ValueError) as download_error:
self.validation_exception = download_error
tmpl = template.Template(
{"HeatTemplateFormatVersion": "2012-12-12"})
# re-generate the properties and attributes from the template.
self.properties_schema, self.attributes_schema = self.get_schemas(
tmpl, self.stack.env.param_defaults)
self.attributes_schema.update(self.base_attributes_schema)
self.attributes.set_schema(self.attributes_schema)
def child_params(self):
"""Override method of child_params for the resource.
:return: parameter values for our nested stack based on our properties
"""
params = {}
for pname, pval in iter(self.properties.props.items()):
if not pval.implemented():
continue
try:
val = self.properties.get_user_value(pname)
except ValueError:
if self.action == self.INIT:
prop = self.properties.props[pname]
val = prop.get_value(None)
else:
raise
if val is not None:
# take a list and create a CommaDelimitedList
if pval.type() == properties.Schema.LIST:
if len(val) == 0:
params[pname] = ''
elif isinstance(val[0], dict):
flattened = []
for (count, item) in enumerate(val):
for (ik, iv) in iter(item.items()):
mem_str = '.member.%d.%s=%s' % (count, ik, iv)
flattened.append(mem_str)
params[pname] = ','.join(flattened)
else:
# When None is returned from get_attr, creating a
# delimited list with it fails during validation.
# we should sanitize the None values to empty strings.
# FIXME(rabi) this needs a permanent solution
# to sanitize attributes and outputs in the future.
params[pname] = ','.join(
(x if x is not None else '') for x in val)
else:
# for MAP, the JSON param takes either a collection or
# string, so just pass it on and let the param validate
# as appropriate
params[pname] = val
return params
def child_template(self):
if not self._parsed_nested:
self._parsed_nested = template_format.parse(self.template_data(),
self.template_url)
return self._parsed_nested
def regenerate_info_schema(self, definition):
self._get_resource_info(definition)
self._generate_schema()
@property
def template_url(self):
return self._template_name
def template_data(self):
# we want to have the latest possible template.
# 1. look in files
# 2. try download
# 3. look in the db
reported_excp = None
t_data = self.stack.t.files.get(self.template_url)
stored_t_data = t_data
if t_data is None:
LOG.debug('TemplateResource data file "%s" not found in files.',
self.template_url)
if not t_data and self.template_url.endswith((".yaml", ".template")):
try:
t_data = self.get_template_file(self.template_url,
self.allowed_schemes)
except exception.NotFound as err:
if self.action == self.UPDATE:
raise
reported_excp = err
if t_data is None:
nested_identifier = self.nested_identifier()
if nested_identifier is not None:
nested_t = self.rpc_client().get_template(self.context,
nested_identifier)
t_data = jsonutils.dumps(nested_t)
if t_data is not None:
if t_data != stored_t_data:
self.stack.t.files[self.template_url] = t_data
self.stack.t.env.register_class(self.resource_type,
self.template_url,
path=self.resource_path)
return t_data
if reported_excp is None:
reported_excp = ValueError(_('Unknown error retrieving %s') %
self.template_url)
raise reported_excp
def _validate_against_facade(self, facade_cls):
facade_schemata = properties.schemata(facade_cls.properties_schema)
for n, fs in facade_schemata.items():
if fs.required and n not in self.properties_schema:
msg = (_("Required property %(n)s for facade %(type)s "
"missing in provider") % {'n': n, 'type': self.type()})
raise exception.StackValidationFailed(message=msg)
ps = self.properties_schema.get(n)
if (n in self.properties_schema and
(fs.allowed_param_prop_type() != ps.type)):
# Type mismatch
msg = (_("Property %(n)s type mismatch between facade %(type)s"
" (%(fs_type)s) and provider (%(ps_type)s)") % {
'n': n, 'type': self.type(),
'fs_type': fs.type, 'ps_type': ps.type})
raise exception.StackValidationFailed(message=msg)
for n, ps in self.properties_schema.items():
if ps.required and n not in facade_schemata:
# Required property for template not present in facade
msg = (_("Provider requires property %(n)s "
"unknown in facade %(type)s") % {
'n': n, 'type': self.type()})
raise exception.StackValidationFailed(message=msg)
facade_attrs = facade_cls.attributes_schema.copy()
facade_attrs.update(facade_cls.base_attributes_schema)
for attr in facade_attrs:
if attr not in self.attributes_schema:
msg = (_("Attribute %(attr)s for facade %(type)s "
"missing in provider") % {
'attr': attr, 'type': self.type()})
raise exception.StackValidationFailed(message=msg)
def validate(self):
# Calls validate_template()
result = super(TemplateResource, self).validate()
try:
self.template_data()
except ValueError as ex:
msg = _("Failed to retrieve template data: %s") % ex
raise exception.StackValidationFailed(message=msg)
# If we're using an existing resource type as a facade for this
# template, check for compatibility between the interfaces.
try:
fri = self.stack.env.get_resource_info(
self.type(),
resource_name=self.name,
ignore=self.resource_info)
except exception.EntityNotFound:
pass
else:
facade_cls = fri.get_class(files=self.stack.t.files)
self._validate_against_facade(facade_cls)
return result
def validate_template(self):
if self.validation_exception is not None:
msg = str(self.validation_exception)
raise exception.StackValidationFailed(message=msg)
return super(TemplateResource, self).validate_template()
def handle_adopt(self, resource_data=None):
return self.create_with_template(self.child_template(),
self.child_params(),
adopt_data=resource_data)
def handle_create(self):
return self.create_with_template(self.child_template(),
self.child_params())
def metadata_update(self, new_metadata=None):
"""Refresh the metadata if new_metadata is None."""
if new_metadata is None:
self.metadata_set(self.t.metadata())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self.properties = json_snippet.properties(self.properties_schema,
self.context)
return self.update_with_template(self.child_template(),
self.child_params())
def get_reference_id(self):
if self.resource_id is None:
return str(self.name)
if STACK_ID_OUTPUT in self.attributes.cached_attrs:
return self.attributes.cached_attrs[STACK_ID_OUTPUT]
stack_identity = self.nested_identifier()
reference_id = stack_identity.arn()
try:
if self._outputs is not None:
reference_id = self.get_output(STACK_ID_OUTPUT)
elif STACK_ID_OUTPUT in self.attributes:
output = self.rpc_client().show_output(self.context,
dict(stack_identity),
STACK_ID_OUTPUT)
if rpc_api.OUTPUT_ERROR in output:
raise exception.TemplateOutputError(
resource=self.name,
attribute=STACK_ID_OUTPUT,
message=output[rpc_api.OUTPUT_ERROR])
reference_id = output[rpc_api.OUTPUT_VALUE]
except exception.TemplateOutputError as err:
LOG.info('%s', err)
except exception.NotFound:
pass
self.attributes.set_cached_attr(STACK_ID_OUTPUT, reference_id)
return reference_id
def get_attribute(self, key, *path):
if self.resource_id is None:
return None
# first look for explicit resource.x.y
if key.startswith('resource.'):
return grouputils.get_nested_attrs(self, key, False, *path)
# then look for normal outputs
try:
return attributes.select_from_attribute(self.get_output(key),
path)
except exception.NotFound:
raise exception.InvalidTemplateAttribute(resource=self.name,
key=key)
|
openstack/heat
|
heat/engine/resources/template_resource.py
|
Python
|
apache-2.0
| 14,723 |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GZTAN dataset."""
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@misc{tzanetakis_essl_cook_2001,
author = "Tzanetakis, George and Essl, Georg and Cook, Perry",
title = "Automatic Musical Genre Classification Of Audio Signals",
url = "http://ismir2001.ismir.net/pdf/tzanetakis.pdf",
publisher = "The International Society for Music Information Retrieval",
year = "2001"
}
"""
_DESCRIPTION = """
The dataset consists of 1000 audio tracks each 30 seconds long.
It contains 10 genres, each represented by 100 tracks.
The tracks are all 22050Hz Mono 16-bit audio files in .wav format.
The genres are:
* blues
* classical
* country
* disco
* hiphop
* jazz
* metal
* pop
* reggae
* rock
"""
_DOWNLOAD_URL = "http://opihi.cs.uvic.ca/sound/genres.tar.gz"
_HOMEPAGE_URL = "http://marsyas.info/index.html"
_CLASS_LABELS = [
"blues", "classical", "country", "disco", "hiphop", "jazz", "metal", "pop",
"reggae", "rock"
]
class GTZAN(tfds.core.GeneratorBasedBuilder):
"""GTZAN Dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"audio": tfds.features.Audio(file_format="wav", sample_rate=22050),
"label": tfds.features.ClassLabel(names=_CLASS_LABELS),
"audio/filename": tfds.features.Text(),
}),
supervised_keys=("audio", "label"),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract({"genres": _DOWNLOAD_URL})
path = os.path.join(dl_paths["genres"], "genres")
# There is no predefined train/val/test split for this dataset.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN, gen_kwargs={"path": path}),
]
def _generate_examples(self, path):
"""Yields examples.
Args:
path: Path of the downloaded and extracted directory
Yields:
Next examples
"""
for root, _, file_name in tf.io.gfile.walk(path):
for fname in file_name:
if fname.endswith(".wav"): # select only .wav files
# Each .wav file has name in the format of <genre>.<number>.wav
label = fname.split(".")[0]
key = fname
example = {
"audio": os.path.join(root, fname),
"label": label,
"audio/filename": fname,
}
yield key, example
|
tensorflow/datasets
|
tensorflow_datasets/audio/gtzan/gtzan.py
|
Python
|
apache-2.0
| 3,225 |
##################################################
#
# IMpot
#
#################################################
import subprocess
import shlex
import packet
##################################################
#
# Constants
#
#################################################
# Genrals imports
HEADER = "tshark -r"
PROTOCOL = ''' -nn -T fields -R 'http || sip || msrp' '''
FILTRE = """ -e frame.protocols""" #0
FILTRE = FILTRE + """ -e frame.number""" #1
FILTRE = FILTRE + """ -e frame.time_delta""" #2
FILTRE = FILTRE + """ -e ip.src """ #3
FILTRE = FILTRE + """ -e ip.dst """ #4
FILTRE = FILTRE + """ -e sip.Method """ #5
FILTRE = FILTRE + """ -e sip.Status-Code""" #6
FILTRE = FILTRE + """ -e msrp.method """ #7
FILTRE = FILTRE + """ -e msrp.data """ #8
FILTRE = FILTRE + """ -e msrp.status.code""" #9
FILTRE = FILTRE + """ -e diameter.cmd.code """ #10
array_color_http=['blue','orange']
array_color_msrp=['blue','orange']
array_color_diameter=['blue','orange']
array_color_sip=['blue','orange']
all_packets = []
##################################################
#
# Functions
#
#################################################
def extract(file):
output = subprocess.check_output(shlex.split(HEADER+ file+PROTOCOL+FILTRE))
lines = output.splitlines()
for i in lines:
attr_list = i.split('\t')
if ('sip' in attr_list[0] ):
print (attr_list[0])
newpacket = packet.Sip_packet(attr_list[0],attr_list[1],attr_list[2],attr_list[3],attr_list[4],attr_list[5],attr_list[6])
print(newpacket.ip_src)
print(newpacket.method)
print(newpacket.request_name)
##################################################
#
# Main
#
#################################################
print ('begin')
file ='/tmp/capture.cap'
extract(file)
print ('end')
|
fabthefabulousfab/script_plantuml
|
extract_tshark.py
|
Python
|
apache-2.0
| 1,810 |
"""Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo [email protected]:.')
child.expect ('Password:')
child.sendline (mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, Shane Kerr and Thomas Kluyver. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Pexpect Copyright (c) 2010 Noah Spurrier
http://pexpect.sourceforge.net/
"""
try:
import os, sys, time
import select
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
except ImportError, e:
raise ImportError (str(e) + """
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.""")
__version__ = '2.5.1'
version = __version__
version_info = (2,5,1)
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnb', 'run', 'which',
'split_command_line', '__version__']
# Exception classes used by this module.
class ExceptionPexpect(Exception):
"""Base class for all exceptions raised by this module.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
"""This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. """
tblist = traceback.extract_tb(sys.exc_info()[2])
#tblist = filter(self.__filter_not_pexpect, tblist)
tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
def __filter_not_pexpect(self, trace_list_item):
"""This returns True if list item 0 the string 'pexpect.py' in it. """
if trace_list_item[0].find('pexpect.py') == -1:
return True
else:
return False
class EOF(ExceptionPexpect):
"""Raised when EOF is read from a child. This usually means the child has exited."""
class TIMEOUT(ExceptionPexpect):
"""Raised when a read time exceeds the timeout. """
##class TIMEOUT_PATTERN(TIMEOUT):
## """Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## """
##class MAXBUFFER(ExceptionPexpect):
## """Raised when a scan buffer fills before matching an expected pattern."""
PY3 = (sys.version_info[0] >= 3)
def _cast_bytes(s, enc):
if isinstance(s, unicode):
return s.encode(enc)
return s
def _cast_unicode(s, enc):
if isinstance(s, bytes):
return s.decode(enc)
return s
re_type = type(re.compile(''))
def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None,
logfile=None, cwd=None, env=None, encoding='utf-8'):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudo ttys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo [email protected]:.')
child.expect ('(?i)password')
child.sendline (mypassword)
The previous code can be replace with the following::
from pexpect import *
run ('scp foo [email protected]:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run ("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run ("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run ("ssh [email protected] 'ls -l'", events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback."""
if timeout == -1:
child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
encoding=encoding)
else:
child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
cwd=cwd, env=env, encoding=encoding)
if events is not None:
patterns = events.keys()
responses = events.values()
else:
patterns=None # We assume that EOF or TIMEOUT will save us.
responses=None
child_result_list = []
event_count = 0
while 1:
try:
index = child.expect (patterns)
if isinstance(child.after, basestring):
child_result_list.append(child.before + child.after)
else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
child_result_list.append(child.before)
if isinstance(responses[index], basestring):
child.send(responses[index])
elif type(responses[index]) is types.FunctionType:
callback_result = responses[index](locals())
sys.stdout.flush()
if isinstance(callback_result, basestring):
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError ('The callback must be a string or function type.')
event_count = event_count + 1
except TIMEOUT, e:
child_result_list.append(child.before)
break
except EOF, e:
child_result_list.append(child.before)
break
child_result = child._empty_buffer.join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawnb(object):
"""Use this class to start and control child applications with a pure-bytes
interface."""
_buffer_type = bytes
def _cast_buffer_type(self, s):
return _cast_bytes(s, self.encoding)
_empty_buffer = b''
_pty_newline = b'\r\n'
# Some code needs this to exist, but it's mainly for the spawn subclass.
encoding = 'utf-8'
def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None):
"""This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn ('/usr/bin/ftp')
child = pexpect.spawn ('/usr/bin/ssh [email protected]')
child = pexpect.spawn ('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn ('/usr/bin/ftp', [])
child = pexpect.spawn ('/usr/bin/ssh', ['[email protected]'])
child = pexpect.spawn ('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
If you want to run a command and pipe it through another command then
you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > log_list.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incomming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incomming buffer. The default is to search from the beginning of the
imcomming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match The searchwindowsize does not
effect the size of the incomming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
self.status = None # status returned by os.waitpid
self.flag_eof = False
self.pid = None
self.child_fd = -1 # initially closed
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
self.logfile_read = None # input from child (read_nonblocking)
self.logfile_send = None # output to send (send, sendline)
self.maxread = maxread # max bytes to read at one time into buffer
self.buffer = self._empty_buffer # This is the read buffer. See maxread.
self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
# Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
self.softspace = False # File-like object.
self.name = '<' + repr(self) + '>' # File-like object.
self.closed = True # File-like object.
self.cwd = cwd
self.env = env
self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
# Solaris uses internal __fork_pty(). All others use pty.fork().
if 'solaris' in sys.platform.lower() or 'sunos5' in sys.platform.lower():
self.use_native_pty_fork = False
else:
self.use_native_pty_fork = True
# allow dummy instances for subclasses that may not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn (command, args)
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. """
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
# -- Fernando Perez
try:
self.close()
except:
pass
def __str__(self):
"""This returns a human-readable string that represents the state of
the object. """
s = []
s.append(repr(self))
s.append('version: ' + __version__)
s.append('command: ' + str(self.command))
s.append('args: ' + str(self.args))
s.append('searcher: ' + str(self.searcher))
s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
s.append('before (last 100 chars): ' + str(self.before)[-100:])
s.append('after: ' + str(self.after))
s.append('match: ' + str(self.match))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self,command,args=[]):
"""This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. """
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may haved spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if type(command) == type(0):
raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
if type (args) != type([]):
raise TypeError ('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
self.args = args[:] # work with a copy
self.args.insert (0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join (self.args) + '>'
assert self.pid is None, 'The pid member should be None.'
assert self.command is not None, 'The command member should not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError, e:
raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
else: # Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
if self.pid == 0: # Child
try:
self.child_fd = sys.stdout.fileno() # used by setwinsize()
self.setwinsize(24, 80)
except:
# Some platforms do not like setwinsize (Cygwin).
# This will cause problem when running applications that
# are very picky about window size.
# This is a serious limitation, but not a show stopper.
pass
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
for i in range (3, max_fd):
try:
os.close (i)
except OSError:
pass
# I don't know why this works, but ignoring SIGHUP fixes a
# problem when trying to start a Java daemon with sudo
# (specifically, Tomcat).
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
self.terminated = False
self.closed = False
def __fork_pty(self):
"""This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
"""
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect, "Error! Could not open pty with os.openpty()."
pid = os.fork()
if pid < 0:
raise ExceptionPexpect, "Error! Failed os.fork()."
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
"""This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. """
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty. Harmless if not already connected.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
if fd >= 0:
os.close(fd)
except:
# Already disconnected. This happens if running inside cron.
pass
os.setsid()
# Verify we are disconnected from controlling tty
# by attempting to open it again.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
if fd >= 0:
os.close(fd)
raise ExceptionPexpect, "Error! Failed to disconnect from controlling tty. It is still possible to open /dev/tty."
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR);
if fd < 0:
raise ExceptionPexpect, "Error! Could not open child pty, " + child_name
else:
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect, "Error! Could not open controlling tty, /dev/tty"
else:
os.close(fd)
def fileno (self): # File-like object.
"""This returns the file descriptor of the pty for the child.
"""
return self.child_fd
def close (self, force=True): # File-like object.
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). """
if not self.closed:
self.flush()
os.close (self.child_fd)
time.sleep(self.delayafterclose) # Give kernel time to update process status.
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect ('close() could not terminate the child using terminate()')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush (self): # File-like object.
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty (self): # File-like object.
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. """
return os.isatty(self.child_fd)
def waitnoecho (self, timeout=-1):
"""This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn ('ssh [email protected]')
p.waitnoecho()
p.sendline(mypassword)
If timeout==-1 then this method will use the value in self.timeout.
If timeout==None then this method to block until ECHO flag is False.
"""
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho (self):
"""This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho(). """
attr = termios.tcgetattr(self.child_fd)
if attr[3] & termios.ECHO:
return True
return False
def setecho (self, state):
"""This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat')
p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
p.expect (['1234'])
p.expect (['1234'])
p.setecho(False) # Turn off tty echo
p.sendline ('abcd') # We will set this only once (echoed by cat).
p.sendline ('wxyz') # We will set this only once (echoed by cat)
p.expect (['abcd'])
p.expect (['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
p.setecho(False) # Turn off tty echo
p.sendline ('abcd') # We will set this only once (echoed by cat).
p.sendline ('wxyz') # We will set this only once (echoed by cat)
p.expect (['1234'])
p.expect (['1234'])
p.expect (['abcd'])
p.expect (['wxyz'])
"""
self.child_fd
attr = termios.tcgetattr(self.child_fd)
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
# I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
# and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
def read_nonblocking (self, size = 1, timeout = -1):
"""This reads at most size bytes from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely. If timeout is -1
then the self.timeout value is used. If timeout is 0 then the child is
polled and if there was no data immediately ready then this will raise
a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. """
if self.closed:
raise ValueError ('I/O operation on closed file in read_nonblocking().')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
if not r:
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
elif self.__irix_hack:
# This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
# This adds a 2 second delay, but only when the child is terminated.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
r,w,e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their processes are alive;
# then timeout on the select; and then finally admit that they are not alive.
self.flag_eof = True
raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
else:
raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError, e: # Linux does this
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
if s == b'': # BSD style
self.flag_eof = True
raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
s2 = self._cast_buffer_type(s)
if self.logfile is not None:
self.logfile.write(s2)
self.logfile.flush()
if self.logfile_read is not None:
self.logfile_read.write(s2)
self.logfile_read.flush()
return s
raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
def read (self, size = -1): # File-like object.
"""This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. """
if size == 0:
return self._empty_buffer
if size < 0:
self.expect (self.delimiter) # delimiter default is EOF
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
if self._buffer_type is bytes:
pat = (u'.{%d}' % size).encode('ascii')
else:
pat = u'.{%d}' % size
cre = re.compile(pat, re.DOTALL)
index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
if index == 0:
return self.after ### self.before should be ''. Should I assert this?
return self.before
def readline(self, size = -1):
"""This reads and returns one entire line. A trailing newline is kept
in the string, but may be absent when a file ends with an incomplete
line. Note: This readline() looks for a \\r\\n pair even on UNIX
because this is what the pseudo tty device returns. So contrary to what
you may expect you will receive the newline as \\r\\n. An empty string
is returned when EOF is hit immediately. Currently, the size argument is
mostly ignored, so this behavior is not standard for a file-like
object. If size is 0 then an empty string is returned. """
if size == 0:
return self._empty_buffer
index = self.expect ([self._pty_newline, self.delimiter]) # delimiter default is EOF
if index == 0:
return self.before + self._pty_newline
return self.before
def __iter__ (self): # File-like object.
"""This is to support iterators over a file-like object.
"""
return self
def next (self): # File-like object.
"""This is to support iterators over a file-like object.
"""
result = self.readline()
if result == self._empty_buffer:
raise StopIteration
return result
def readlines (self, sizehint = -1): # File-like object.
"""This reads until EOF using readline() and returns a list containing
the lines thus read. The optional "sizehint" argument is ignored. """
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s): # File-like object.
"""This is similar to send() except that there is no return value.
"""
self.send (s)
def writelines (self, sequence): # File-like object.
"""This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators There is no return value.
"""
for s in sequence:
self.write (s)
def send(self, s):
"""This sends a string to the child process. This returns the number of
bytes written. If a log file was set then the data is also written to
the log. """
time.sleep(self.delaybeforesend)
s2 = self._cast_buffer_type(s)
if self.logfile is not None:
self.logfile.write(s2)
self.logfile.flush()
if self.logfile_send is not None:
self.logfile_send.write(s2)
self.logfile_send.flush()
c = os.write (self.child_fd, _cast_bytes(s, self.encoding))
return c
def sendline(self, s=''):
"""This is like send(), but it adds a line feed (os.linesep). This
returns the number of bytes written. """
n = self.send (s)
n = n + self.send (os.linesep)
return n
def sendcontrol(self, char):
"""This sends a control character to the child such as Ctrl-C or
Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
child.sendcontrol('g')
See also, sendintr() and sendeof().
"""
char = char.lower()
a = ord(char)
if a>=97 and a<=122:
a = a - ord('a') + 1
return self.send (chr(a))
d = {'@':0, '`':0,
'[':27, '{':27,
'\\':28, '|':28,
']':29, '}': 29,
'^':30, '~':30,
'_':31,
'?':127}
if char not in d:
return 0
return self.send (chr(d[char]))
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. """
### Hmmm... how do I send an EOF?
###C if ((m = write(pty, *buf, p - *buf)) < 0)
###C return (errno == EWOULDBLOCK) ? n : -1;
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd) # remember current state
#attr = termios.tcgetattr(fd)
#attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
#try: # use try/finally to ensure state gets restored
# termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# if hasattr(termios, 'CEOF'):
# os.write (self.child_fd, '%c' % termios.CEOF)
# else:
# # Silly platform does not define CEOF so assume CTRL-D
# os.write (self.child_fd, '%c' % 4)
#finally: # restore state
# termios.tcsetattr(fd, termios.TCSADRAIN, old)
if hasattr(termios, 'VEOF'):
char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
else:
# platform does not define VEOF so assume CTRL-D
char = chr(4)
self.send(char)
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
if hasattr(termios, 'VINTR'):
char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
else:
# platform does not define VINTR so assume CTRL-C
char = chr(3)
self.send (char)
def eof (self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def terminate(self, force=False):
"""This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. """
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError, e:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(); but, technically, the child
is still alive until its output is read. """
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect ('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED (status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED (status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED (status):
raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. """
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form of waitpid to get
# status of a defunct process. This is super-lame. The flag_eof would have
# been set in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e: # No child processes
if e.errno == errno.ECHILD:
raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
else:
raise e
# I have to do this twice for Solaris. I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process wishes to
# report, and the value of status is undefined.
if pid == 0:
try:
pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
except OSError, e: # This should never happen...
if e[0] == errno.ECHILD:
raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
else:
raise e
# If pid is still 0 after two calls to waitpid() then
# the process really is alive. This seems to work on all platforms, except
# for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
# take care of this situation (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED (status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED (status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED (status):
raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
return False
def kill(self, sig):
"""This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. """
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def compile_pattern_list(self, patterns):
"""This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
"""
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
compile_flags = re.DOTALL # Allow dot to match \n
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for p in patterns:
if isinstance(p, (bytes, unicode)):
p = self._cast_buffer_type(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif type(p) is re_type:
p = self._prepare_regex_pattern(p)
compiled_pattern_list.append(p)
else:
raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
return compiled_pattern_list
def _prepare_regex_pattern(self, p):
"Recompile unicode regexes as bytes regexes. Overridden in subclass."
if isinstance(p.pattern, unicode):
p = re.compile(p.pattern.encode('utf-8'), p.flags &~ re.UNICODE)
return p
def expect(self, pattern, timeout = -1, searchwindowsize=-1):
"""This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first match
in the stream is chosen. If more than one pattern matches at that point,
the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect (['bar', 'foo', 'foobar'])
# returns 1 ('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect (['foobar', 'foo'])
# returns 0 ('foobar') if all input is available at once,
# but returs 1 ('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect (['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect (pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
"""
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
"""This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT (which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. """
return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
"""This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match."""
if isinstance(pattern_list, (bytes, unicode)) or pattern_list in (TIMEOUT, EOF):
pattern_list = [pattern_list]
return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
"""This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and what
to search for in the input.
See expect() for other arguments, return value and exceptions. """
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True: # Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end : ]
self.before = incoming[ : searcher.start]
self.after = incoming[searcher.start : searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if timeout is not None and timeout < 0:
raise TIMEOUT ('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking (self.maxread, timeout)
freshlen = len(c)
time.sleep (0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF, e:
self.buffer = self._empty_buffer
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF (str(e) + '\n' + str(self))
except TIMEOUT, e:
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT (str(e) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
"""This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). """
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, r, c):
"""This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. """
# Check for buggy platforms. Some Python versions on some platforms
# (notably OSF1 Alpha and RedHat 7.1) truncate the value for
# termios.TIOCSWINSZ. It is not clear why this happens.
# These platforms don't seem to handle the signed int very well;
# yet other platforms like OpenBSD have a large negative value for
# TIOCSWINSZ and they don't have a truncate problem.
# Newer versions of Linux have totally different values for TIOCSWINSZ.
# Note that this fix is a hack.
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
TIOCSWINSZ = -2146929561 # Same bits, but with sign.
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', r, c, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character = b'\x1d', input_filter = None, output_filter = None):
"""This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
"""
# Flush the buffer.
if PY3: self.stdout.write(_cast_unicode(self.buffer, self.encoding))
else: self.stdout.write(self.buffer)
self.stdout.flush()
self.buffer = self._empty_buffer
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
"""This is used by the interact() method.
"""
while data != b'' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
"""This is used by the interact() method.
"""
return os.read(fd, 1000)
def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
"""This is used by the interact() method.
"""
while self.isalive():
r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
data = self.__interact_read(self.child_fd)
if output_filter: data = output_filter(data)
if self.logfile is not None:
self.logfile.write (data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter: data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select (self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). """
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select (iwtd, owtd, ewtd, timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
# if we loop back we have to subtract the amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else: # something else caused the select.error, so this really is an exception
raise
class spawn(spawnb):
"""This is the main class interface for Pexpect. Use this class to start
and control child applications."""
_buffer_type = unicode
def _cast_buffer_type(self, s):
return _cast_unicode(s, self.encoding)
_empty_buffer = u''
_pty_newline = u'\r\n'
def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None,
logfile=None, cwd=None, env=None, encoding='utf-8'):
super(spawn, self).__init__(command, args, timeout=timeout, maxread=maxread,
searchwindowsize=searchwindowsize, logfile=logfile, cwd=cwd, env=env)
self.encoding = encoding
def _prepare_regex_pattern(self, p):
"Recompile bytes regexes as unicode regexes."
if isinstance(p.pattern, bytes):
p = re.compile(p.pattern.decode(self.encoding), p.flags)
return p
def read_nonblocking(self, size=1, timeout=-1):
return super(spawn, self).read_nonblocking(size=size, timeout=timeout)\
.decode(self.encoding)
read_nonblocking.__doc__ = spawnb.read_nonblocking.__doc__
##############################################################################
# End of spawn class
##############################################################################
class searcher_string (object):
"""This is a plain string search helper for the spawn.expect_any() method.
This helper class is for speed. For more powerful regex patterns
see the helper class, searcher_re.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
"""
def __init__(self, strings):
"""This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in enumerate(strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [ (ns[0],' %d: "%s"' % ns) for ns in self._strings ]
ss.append((-1,'searcher_string:'))
if self.eof_index >= 0:
ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
ss.sort()
return '\n'.join(a[1] for a in ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. """
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen+len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and n < first_match:
first_match = n
best_index, best_match = index, s
if first_match == absurd_match:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re (object):
"""This is regular expression string search helper for the
spawn.expect_any() method. This helper class is for powerful
pattern matching. For speed, see the helper class, searcher_string.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
"""
def __init__(self, patterns):
"""This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types."""
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in enumerate(patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
"""This returns a human-readable string that represents the state of
the object."""
ss = [ (n,' %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
ss.append((-1,'searcher_re:'))
if self.eof_index >= 0:
ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
ss.sort()
return '\n'.join(a[1] for a in ss)
def search(self, buffer, freshlen, searchwindowsize=None):
"""This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1."""
absurd_match = len(buffer)
first_match = absurd_match
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer)-searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if n < first_match:
first_match = n
the_match = match
best_index = index
if first_match == absurd_match:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def which (filename):
"""This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None."""
# Special case where filename already contains a path.
if os.path.dirname(filename) != '':
if os.access (filename, os.X_OK):
return filename
if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
f = os.path.join(path, filename)
if os.access(f, os.X_OK):
return f
return None
def split_command_line(command_line):
"""This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. """
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
state_whitespace = 4 # The state of consuming whitespace between commands.
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\': # Escape the next character
state = state_esc
elif c == r"'": # Handle single quote
state = state_singlequote
elif c == r'"': # Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
None # Do nothing.
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vi:set sr et ts=4 sw=4 ft=python :
|
mikewesner-wf/glasshouse
|
appengine/lib/invoke/vendor/pexpect/__init__.py
|
Python
|
apache-2.0
| 78,307 |
#### DB SETUP ####
from pocket_change.database import SQLAlchemyWrapperProxy
sqlalchemy_db = SQLAlchemyWrapperProxy()
from sneeze.database.models import add_models as sneeze_models
from pocket.database import add_models as pocket_models
model_adders = [sneeze_models, pocket_models]
#### APP CONFIG ####
from flask import Flask, render_template, request, session
class AppContextClass(Flask.app_ctx_globals_class):
@property
def jira(self):
try:
return self._jira
except AttributeError:
self._jira = JiraClient({'server' : app.config['JIRA_HOST']})
return self._jira
Flask.app_ctx_globals_class = AppContextClass
app = Flask(__name__.split()[0])
#app.config.from_envvar('POCKET_CHANGE_CONFIG')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////Users/smoke/sneeze//kaichu.sqlite'
app.config['JIRA_HOST'] = 'http://localhost:48080'
app.config['JIRA_AUTH'] = 'qa_reporter;password'
app.debug = True
app.secret_key = 'why would I tell you my secret key?'
##### KAICHU ####
from pocket_change.jira_lib import Client as JiraClient
try:
from kaichu.models import add_models as kaichu_models
except ImportError:
kaichu_models = None
if (JiraClient and kaichu_models
and app.config.get('JIRA_HOST', False)
and app.config.get('JIRA_APP_KEY', False)
and app.config.get('JIRA_RSA_KEY_FILE', False)):
app.config['KAICHU_ENABLED'] = True
model_adders.append(kaichu_models)
else:
app.config['KAICHU_ENABLED'] = False
#### DB INIT ####
sqlalchemy_db.make(app, *model_adders)
#### AUTH ####
from pocket_change.auth import login_manager
login_manager.init_app(app)
#### REST ####
from pocket_change.rest import api
from pocket_change.rest.components import load as load_rest_components
load_rest_components(app)
app.register_blueprint(api, url_prefix='/rest')
#### GUI ####
from pocket_change.ui import load as load_ui, core as core_ui
load_ui(app)
app.register_blueprint(core_ui)
if app.config['KAICHU_ENABLED']:
from pocket_change.ui import kaichu as kaichu_ui
app.register_blueprint(kaichu_ui)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
psusloparov/sneeze
|
pocket_change/pocket_change/__init__.py
|
Python
|
apache-2.0
| 2,161 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import unittest2
from quantum.db import api as db
from quantum.db import models_v2
from quantum.openstack.common import cfg
from quantum.plugins.ryu.common import config
from quantum.plugins.ryu.db import api_v2 as db_api_v2
from quantum.plugins.ryu.db import models_v2 as ryu_models_v2
from quantum.plugins.ryu import ofp_service_type
class RyuDBTest(unittest2.TestCase):
def setUp(self):
options = {"sql_connection": 'sqlite:///:memory:'}
options.update({'base': models_v2.model_base.BASEV2})
reconnect_interval = cfg.CONF.DATABASE.reconnect_interval
options.update({"reconnect_interval": reconnect_interval})
db.configure_db(options)
self.hosts = [(cfg.CONF.OVS.openflow_controller,
ofp_service_type.CONTROLLER),
(cfg.CONF.OVS.openflow_rest_api,
ofp_service_type.REST_API)]
db_api_v2.set_ofp_servers(self.hosts)
def tearDown(self):
db.clear_db()
cfg.CONF.reset()
def test_ofp_server(self):
session = db.get_session()
servers = session.query(ryu_models_v2.OFPServer).all()
print servers
self.assertEqual(len(servers), 2)
for s in servers:
self.assertTrue((s.address, s.host_type) in self.hosts)
|
ruijie/quantum
|
quantum/tests/unit/ryu/test_ryu_db.py
|
Python
|
apache-2.0
| 2,027 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kolla.template import methods
from kolla.tests import base
class MethodsTest(base.TestCase):
def test_debian_package_install(self):
packages = ['https://packages.debian.org/package1.deb', 'package2.deb']
result = methods.debian_package_install(packages)
expectCmd = 'apt-get -y install --no-install-recommends package2.deb'
self.assertEqual(expectCmd, result.split("&&")[1].strip())
def test_enable_repos_rhel(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'rhel',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana'], 'enable')
expectCmd = ''
self.assertEqual(expectCmd, result)
def test_enable_repos_centos(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana'], 'enable')
expectCmd = 'RUN dnf config-manager --enable grafana || true'
self.assertEqual(expectCmd, result)
def test_enable_repos_centos_missing_repo(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['missing_repo'],
'enable')
expectCmd = ''
self.assertEqual(expectCmd, result)
def test_enable_repos_centos_multiple(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana', 'ceph'],
'enable')
expectCmd = 'RUN dnf config-manager --enable grafana '
expectCmd += '--enable centos-ceph-nautilus || true'
self.assertEqual(expectCmd, result)
def test_enable_repos_debian(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
result = methods.handle_repos(template_vars, ['grafana'], 'enable')
expectCmd = 'RUN echo "deb https://packages.grafana.com/oss/deb '
expectCmd += 'stable main" >/etc/apt/sources.list.d/grafana.list'
self.assertEqual(expectCmd, result)
def test_enable_repos_debian_missing_repo(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
result = methods.handle_repos(template_vars, ['missing_repo'],
'enable')
expectCmd = ''
self.assertEqual(expectCmd, result)
def test_enable_repos_debian_multiple(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
result = methods.handle_repos(template_vars, ['grafana', 'kibana'],
'enable')
expectCmd = 'RUN echo "deb https://packages.grafana.com/oss/deb '
expectCmd += 'stable main" >/etc/apt/sources.list.d/grafana.list && '
expectCmd += 'echo "deb '
expectCmd += 'https://artifacts.elastic.co/packages/oss-7.x/apt '
expectCmd += 'stable main" >/etc/apt/sources.list.d/kibana.list'
self.assertEqual(expectCmd, result)
def test_disable_repos_centos(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana'], 'disable')
expectCmd = 'RUN dnf config-manager --disable grafana || true'
self.assertEqual(expectCmd, result)
def test_disable_repos_centos_multiple(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'centos',
'base_package_type': 'rpm',
}
result = methods.handle_repos(template_vars, ['grafana', 'ceph'],
'disable')
expectCmd = 'RUN dnf config-manager --disable grafana '
expectCmd += '--disable centos-ceph-nautilus || true'
self.assertEqual(expectCmd, result)
# NOTE(hrw): there is no disabling of repos for Debian/Ubuntu
def test_disable_repos_debian(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
result = methods.handle_repos(template_vars, ['grafana'], 'disable')
expectCmd = ''
self.assertEqual(expectCmd, result)
def test_handle_repos_string(self):
template_vars = {
'base_arch': 'x86_64',
'base_distro': 'debian',
'base_package_type': 'deb'
}
self.assertRaisesRegex(TypeError,
r'First argument should be a list of '
r'repositories',
methods.handle_repos, template_vars, 'grafana',
'disable')
|
stackforge/kolla
|
kolla/tests/test_methods.py
|
Python
|
apache-2.0
| 5,853 |
#!/usr/bin/env python
from flup.server.fcgi import WSGIServer
from frontend import app
WSGIServer(app, bindAddress=app.config['FCGI_SOCKET']).run()
|
jkossen/imposter
|
examples/frontend_fcgi.py
|
Python
|
bsd-2-clause
| 149 |
import os
from flask import Flask, redirect, render_template_string, request, url_for
from flask_babel import Babel
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import confirm_email_required, current_user, login_required, \
UserManager, UserMixin, SQLAlchemyAdapter
from flask_user.signals import user_sent_invitation, user_registered
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///test_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <[email protected]>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = os.getenv('MAIL_USE_SSL', True)
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
USER_ENABLE_INVITATION = True
USER_REQUIRE_INVITATION = True
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
mail = Mail(app) # Initialize Flask-Mail
babel = Babel(app) # Initialize Flask-Babel
@babel.localeselector
def get_locale():
translations = [str(translation) for translation in babel.list_translations()]
language = request.accept_languages.best_match(translations)
print('translations=',repr(translations), 'language=', repr(language))
return language
# Define the User data model. Make sure to add flask.ext.user UserMixin !!!
class User(db.Model, UserMixin):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50), nullable=True, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime(), nullable=True)
# User information
is_enabled = db.Column(db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
class UserInvitation(db.Model):
__tablename__ = 'user_invite'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), nullable=False)
# save the user of the invitee
invited_by_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# token used for registration page to identify user registering
token = db.Column(db.String(100), nullable=False, server_default='')
# Create all database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserInvitationClass=UserInvitation) # Select database adapter
user_manager = UserManager(db_adapter, app) # Init Flask-User and bind to app
@user_registered.connect_via(app)
def after_registered_hook(sender, user, user_invite):
sender.logger.info("USER REGISTERED")
@user_sent_invitation.connect_via(app)
def after_invitation_hook(sender, **extra):
sender.logger.info("USER SENT INVITATION")
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Home Pages{%endtrans%}</h2>
{% if current_user.is_authenticated %}
<p> <a href="{{ url_for('user_profile_page') }}">
{%trans%}Profile Page{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% else %}
<p> <a href="{{ url_for('user.login') }}">
{%trans%}Sign in or Register{%endtrans%}</a></p>
{% endif %}
{% endblock %}
""")
if current_user.is_authenticated:
return redirect(url_for('user_profile_page'))
else:
return redirect(url_for('user.login'))
# The Profile page requires a logged-in user
@app.route('/user/profiles')
@login_required # Use of @login_required decorator
@confirm_email_required
def user_profile_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Profile Page{%endtrans%}</h2>
<p> {%trans%}Hello{%endtrans%}
{{ current_user.username or current_user.email }},</p>
<p> <a href="{{ url_for('home_page') }}">
{%trans%}Home Page{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_username') }}">
{%trans%}Change username{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_password') }}">
{%trans%}Change password{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.invite') }}">
{%trans%}Invite User{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
|
jamescarignan/Flask-User
|
example_apps/invite_app.py
|
Python
|
bsd-2-clause
| 6,755 |
# -*- coding: utf-8 -*-
import logging
from django.db import models
from system.official_account.models import OfficialAccount
from system.rule.models import Rule
logger_rule_match = logging.getLogger(__name__)
class RuleMatchManager(models.Manager):
"""
微信规则回复表 Manager
"""
def add(self, rule, plugin_iden, reply_id=0, order=0, status=True):
"""
添加微信规则回复
"""
rule_match = super(RuleMatchManager, self).create(
official_account=rule.official_account,
rule=rule,
plugin_iden=plugin_iden,
reply_id=reply_id,
order=order,
status=status
)
logger_rule_match.info('New rule_match created [Detail] %s' % rule_match.__dict__)
return rule_match
def get(self, rule):
"""
根据 rule 返回相应的 QuerySet 集合
返回的集合已经按照优先级排序完毕, 且剔除掉了没有启用的匹配
"""
return super(RuleMatchManager, self).get_queryset().filter(
official_account=rule.official_account
).filter(
rule=rule
).filter(
status=True
).order_by(
'-order', 'id'
)
def get_news(self, news_id):
"""
根据 news_id 返回表中所有对应的图文匹配集合
:param news_id: 图文响应ID
"""
return super(RuleMatchManager, self).get_queryset().filter(
plugin_iden='news'
).filter(
reply_id=news_id
)
class RuleMatch(models.Model):
"""
微信规则匹配表
"""
official_account = models.ForeignKey(OfficialAccount, verbose_name=u'所属公众号')
rule = models.ForeignKey(Rule, verbose_name=u'所属规则')
plugin_iden = models.CharField(u'响应插件标识符', max_length=50)
reply_id = models.PositiveIntegerField(u'响应ID号', default=0)
order = models.PositiveIntegerField(u'优先级', default=0)
status = models.BooleanField(u'是否启用', default=True)
objects = models.Manager()
manager = RuleMatchManager()
class Meta:
verbose_name = u'微信规则匹配表'
verbose_name_plural = u'微信规则匹配表'
db_table = 'wechat_rule_match'
def __unicode__(self):
return self.plugin_iden
|
doraemonext/wechat-platform
|
wechat_platform/system/rule_match/models.py
|
Python
|
bsd-2-clause
| 2,385 |
# Copyright 2008-2009 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import os
import re
import sys
import gtk
from application import application
from base_window import BaseWindow
from library_editor import LibraryEditor
from notebook import LibraryFile, NotebookFile, WorksheetFile
from window_builder import WindowBuilder
from worksheet_editor import WorksheetEditor
class BaseNotebookWindow(BaseWindow):
def __init__(self, notebook):
BaseWindow.__init__(self, notebook)
self.state = application.state.get_notebook_state(notebook.folder)
# We'll call window.set_default_size() later with an appropriate
# default size for the BaseNotebookWindow subclass. The size set by
# window.resize() takes precedence.
(width, height) = self.state.get_size()
if width != -1 and height != -1:
self.window.resize(width, height)
self.window.connect('configure-event', self.on_configure_event)
self.path = notebook.folder
self.editors = []
self.nb_widget = gtk.Notebook()
self.nb_widget.connect_after('switch-page', self.on_page_switched)
self.nb_widget.connect('page-reordered', self.on_page_reordered)
self._fill_content()
self.main_vbox.show_all()
self.__initial_editor = None
open_file_paths = self.state.get_open_files()
current_file = self.state.get_current_file()
for path in open_file_paths:
if not path in self.notebook.files:
continue
file = self.notebook.files[path]
self.open_file(file)
current_file_editor = None
if current_file is not None:
filename = os.path.join(notebook.folder, current_file)
for editor in self.editors:
if editor.filename == filename:
current_file_editor = editor
if current_file_editor is None and len(self.editors) > 0:
current_file_editor = self.editors[0]
if current_file_editor is not None:
self._make_editor_current(current_file_editor)
current_file_editor.view.grab_focus()
self.__update_title()
#######################################################
# Implemented by subclasses
#######################################################
def _fill_contents(self, editor):
raise NotImplementedError()
def _add_editor(self, editor):
self.editors.append(editor)
self.nb_widget.add(editor.widget)
editor.widget._notebook_window_editor = editor
editor.connect('notify::title', self.on_editor_notify_title)
editor.connect('notify::filename', self.on_editor_notify_filename)
editor.connect('notify::modified', self.on_editor_notify_modified)
editor.connect('notify::state', self.on_editor_notify_state)
self._update_editor_title(editor)
self._update_editor_state(editor)
self._update_open_files()
def _close_editor(self, editor):
if not editor.confirm_discard():
return
if editor == self.current_editor:
# Either we'll switch page and a new editor will be set, or we have no pages left
self.current_editor = None
if editor == self.__initial_editor:
self.__initial_editor = None
self.editors.remove(editor)
editor.widget._notebook_window_editor = None
editor.close()
self.__update_title()
self._update_open_files()
self.update_sensitivity()
def _update_editor_state(self, editor):
self.update_sensitivity()
def _update_editor_title(self, editor):
if editor == self.current_editor:
self.__update_title()
#######################################################
# Overrides
#######################################################
def _add_actions(self, action_group):
BaseWindow._add_actions(self, action_group)
action_group.add_actions([
('notebook-properties', gtk.STOCK_PROPERTIES, "Notebook Prop_erties", None, None, self.on_notebook_properties),
('new-worksheet', gtk.STOCK_NEW, "_New Worksheet", "<control>n", None, self.on_new_worksheet),
('new-library', gtk.STOCK_NEW, "New _Library", "", None, self.on_new_library),
('calculate-all', gtk.STOCK_REFRESH, "Calculate _All", "<control><shift>Return", None, self.on_calculate_all),
])
def _close_current(self):
if self.current_editor:
self._close_editor(self.current_editor)
def _close_window(self):
if not self._confirm_discard():
return
BaseWindow._close_window(self)
#######################################################
# Utility
#######################################################
def _make_editor_current(self, editor):
self.nb_widget.set_current_page(self.nb_widget.page_num(editor.widget))
def __close_initial_editor(self):
if self.__initial_editor and not self.__initial_editor.filename and not self.__initial_editor.modified:
self._close_editor(self.__initial_editor)
self.__initial_editor = None
def __new_worksheet(self):
editor = WorksheetEditor(self.notebook)
self._add_editor(editor)
self._make_editor_current(editor)
return editor
def __new_library(self):
editor = LibraryEditor(self.notebook)
self._add_editor(editor)
self._make_editor_current(editor)
return editor
def __update_title(self, *args):
if self.current_editor:
title = self.current_editor.title + " - " + os.path.basename(self.notebook.folder) + " - Reinteract"
else:
title = os.path.basename(self.notebook.folder) + " - Reinteract"
self.window.set_title(title)
def _confirm_discard(self, before_quit=False):
for editor in self.editors:
if editor.modified:
# Let the user see what they are discard or not discarding
self.window.present_with_time(gtk.get_current_event_time())
self._make_editor_current(editor)
if not editor.confirm_discard(before_quit=before_quit):
return False
return True
def _update_open_files(self):
open_file_paths = []
for child in self.nb_widget.get_children():
file = child._notebook_window_editor.file
if not file:
continue
open_file_paths.append(file.path)
self.state.set_open_files(open_file_paths)
def _update_current_file(self):
file = self.current_editor.file
if file is not None:
self.state.set_current_file(file.path)
else:
self.state.set_current_file(None)
def _update_size(self, width, height):
self.state.set_size(width, height)
#######################################################
# Callbacks
#######################################################
def on_notebook_properties(self, action):
builder = WindowBuilder('notebook-properties')
builder.dialog.set_transient_for(self.window)
builder.dialog.set_title("%s - Properties" % self.notebook.info.name)
builder.name_entry.set_text(self.notebook.info.name)
builder.name_entry.set_sensitive(False)
builder.description_text_view.get_buffer().props.text = self.notebook.info.description
response = builder.dialog.run()
if response == gtk.RESPONSE_OK:
self.notebook.info.description = builder.description_text_view.get_buffer().props.text
builder.dialog.destroy()
def on_new_worksheet(self, action):
self.__new_worksheet()
def on_new_library(self, action):
self.__new_library()
def on_calculate_all(self, action):
for editor in self.editors:
if editor.needs_calculate:
editor.calculate()
def on_page_switched(self, notebook, _, page_num):
widget = self.nb_widget.get_nth_page(page_num)
for editor in self.editors:
if editor.widget == widget:
self.current_editor = editor
self.__update_title()
self._update_current_file()
self.update_sensitivity()
break
def on_page_reordered(self, notebook, page, new_page_num):
self._update_open_files()
def on_editor_notify_title(self, editor, *args):
self._update_editor_title(editor)
def on_editor_notify_filename(self, editor, *args):
self._update_open_files()
self._update_current_file()
def on_editor_notify_modified(self, editor, *args):
if editor == self.current_editor:
self.update_sensitivity()
def on_editor_notify_state(self, editor, *args):
self._update_editor_state(editor)
def on_configure_event(self, window, event):
self._update_size(event.width, event.height)
return False
#######################################################
# Public API
#######################################################
def confirm_discard(self):
if not self._confirm_discard(before_quit=True):
return False
return True
def open_file(self, file):
filename = os.path.join(self.notebook.folder, file.path)
for editor in self.editors:
if editor.file == file:
self._make_editor_current(editor)
return True
editor = self._load_editor(filename)
if not editor:
return False
self._add_editor(editor)
self._make_editor_current(editor)
self.__close_initial_editor()
return True
def add_initial_worksheet(self):
"""If there are no editors open, add a new blank worksheet"""
if len(self.editors) == 0:
self.__initial_editor = self.__new_worksheet()
self.__initial_editor.view.grab_focus()
def update_sensitivity(self):
BaseWindow.update_sensitivity(self)
some_need_calculate = False
for editor in self.editors:
if editor.needs_calculate:
some_need_calculate = True
self._set_action_sensitive('calculate-all', some_need_calculate)
|
jbaayen/reinteract
|
lib/reinteract/base_notebook_window.py
|
Python
|
bsd-2-clause
| 10,711 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.