Datasets:
code stringlengths 84 247k | code_en stringlengths 84 247k | language stringclasses 1
value | file_path stringlengths 37 157 | license stringclasses 1
value | token_count int64 33 243k |
|---|---|---|---|---|---|
#! /usr/bin/env python3
import argparse
import collections
import csv
choices = list(map(chr, range(ord('A'), ord('E')+1)))
def parse_dump(f):
table = {}
with open(f, 'r') as stream:
for line in stream.readlines():
if line.startswith('Captured:'):
parts = line.replace('(','').replace(')','').replace(',','').split()
answer = parts[1]
student_id = ''.join(parts[2:])
table[student_id] = answer
return table
def tally(table):
count = collections.OrderedDict()
for choice in choices:
count[choice] = 0
for student_id, answer in table.items():
if answer in count:
count[answer] += 1;
return count
def save(f, table):
with open(f, 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in table.items():
writer.writerow([key, value])
def main():
parser = argparse.ArgumentParser(
description='Tallies raw class dump data and optionaly outputs a CSV matching IDs to answers')
parser.add_argument(
'dump', help='raw dump file')
parser.add_argument('-t', '--table' , help='saves table to file', nargs=1)
args = parser.parse_args()
table = parse_dump(args.dump)
count = tally(table)
print('Students: %i' % len(table))
for choice in choices:
print('%s\t%i' % (choice, count[choice]))
if args.table:
save(args.table[0], table)
if __name__ == '__main__':
main()
| #! /usr/bin/env python3
import argparse
import collections
import csv
choices = list(map(chr, range(ord('A'), ord('E')+1)))
def parse_dump(f):
table = {}
with open(f, 'r') as stream:
for line in stream.readlines():
if line.startswith('Captured:'):
parts = line.replace('(','').replace(')','').replace(',','').split()
answer = parts[1]
student_id = ''.join(parts[2:])
table[student_id] = answer
return table
def tally(table):
count = collections.OrderedDict()
for choice in choices:
count[choice] = 0
for student_id, answer in table.items():
if answer in count:
count[answer] += 1;
return count
def save(f, table):
with open(f, 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in table.items():
writer.writerow([key, value])
def main():
parser = argparse.ArgumentParser(
description='Tallies raw class dump data and optionaly outputs a CSV matching IDs to answers')
parser.add_argument(
'dump', help='raw dump file')
parser.add_argument('-t', '--table' , help='saves table to file', nargs=1)
args = parser.parse_args()
table = parse_dump(args.dump)
count = tally(table)
print('Students: %i' % len(table))
for choice in choices:
print('%s\t%i' % (choice, count[choice]))
if args.table:
save(args.table[0], table)
if __name__ == '__main__':
main()
| en | 000418673_wizard97-iSkipper_tally_7bb28e209a89.py | unknown | 463 |
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from flask_restful import Resource
from marshmallow import fields
from webargs.flaskparser import use_args
from redisolar.api.base import DaoResource
from redisolar.models import MeterReading
from redisolar.schema import MeterReadingsSchema
MAX_RECENT_FEEDS = 1000
DEFAULT_RECENT_FEEDS = 100
def get_feed_count(count: Optional[int]):
"""Decide a safe number of feeds to return."""
if count is None or count < 0:
return DEFAULT_RECENT_FEEDS
if count > MAX_RECENT_FEEDS:
return MAX_RECENT_FEEDS
return count
class GlobalMeterReadingResource(Resource):
"""A RESTful resource representing meter readings for all sites."""
def __init__(self, meter_reading_dao: Any, feed_dao: Any):
self.meter_reading_dao = meter_reading_dao
self.feed_dao = feed_dao
@use_args(MeterReadingsSchema)
def post(self, meter_readings: Dict[str, List[MeterReading]]) -> Tuple[str, int]:
"""Create a new meter reading."""
for reading in meter_readings['readings']:
self.meter_reading_dao.add(reading)
return "Accepted", 202
@use_args({"count": fields.Int()}, location="query")
def get(self, args: Dict[str, int]) -> Dict[str, Dict]:
"""Get a list of meter readings."""
count = args.get('count')
readings = self.feed_dao.get_recent_global(get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
class SiteMeterReadingResource(DaoResource):
"""A RESTful resource representing meter readings for specific sites."""
@use_args({"count": fields.Int()}, location="query")
def get(self, args, site_id):
"""Get recent meter readings for a specific site."""
count = args.get('count')
readings = self.dao.get_recent_for_site(site_id, get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
| from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from flask_restful import Resource
from marshmallow import fields
from webargs.flaskparser import use_args
from redisolar.api.base import DaoResource
from redisolar.models import MeterReading
from redisolar.schema import MeterReadingsSchema
MAX_RECENT_FEEDS = 1000
DEFAULT_RECENT_FEEDS = 100
def get_feed_count(count: Optional[int]):
"""Decide a safe number of feeds to return."""
if count is None or count < 0:
return DEFAULT_RECENT_FEEDS
if count > MAX_RECENT_FEEDS:
return MAX_RECENT_FEEDS
return count
class GlobalMeterReadingResource(Resource):
"""A RESTful resource representing meter readings for all sites."""
def __init__(self, meter_reading_dao: Any, feed_dao: Any):
self.meter_reading_dao = meter_reading_dao
self.feed_dao = feed_dao
@use_args(MeterReadingsSchema)
def post(self, meter_readings: Dict[str, List[MeterReading]]) -> Tuple[str, int]:
"""Create a new meter reading."""
for reading in meter_readings['readings']:
self.meter_reading_dao.add(reading)
return "Accepted", 202
@use_args({"count": fields.Int()}, location="query")
def get(self, args: Dict[str, int]) -> Dict[str, Dict]:
"""Get a list of meter readings."""
count = args.get('count')
readings = self.feed_dao.get_recent_global(get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
class SiteMeterReadingResource(DaoResource):
"""A RESTful resource representing meter readings for specific sites."""
@use_args({"count": fields.Int()}, location="query")
def get(self, args, site_id):
"""Get recent meter readings for a specific site."""
count = args.get('count')
readings = self.dao.get_recent_for_site(site_id, get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
| en | 000182694_4heck-ru102py_meter_reading_3d42e5bb117a.py | unknown | 586 |
#!/usr/bin/env python
import sys
from cvangysel import argparse_utils, logging_utils
import argparse
import logging
import matplotlib.cm as cm
import matplotlib.markers as markers
import matplotlib.pyplot as plt
import numpy as np
import os
import pylatex.utils
import pyndri
from sklearn.manifold import TSNE
import nvsm
MARKERS = ['o', 's', '<', '>', '^', 'v', 'd', 'p', '*', '8',
'1', '2', '3', '4',
markers.TICKLEFT, markers.TICKRIGHT,
markers.TICKUP, markers.TICKDOWN,
markers.CARETLEFT, markers.CARETRIGHT,
markers.CARETUP, markers.CARETDOWN]
plt.rcParams["figure.figsize"] = (8.0, 4.25)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model')
parser.add_argument('index', type=argparse_utils.existing_directory_path)
parser.add_argument('--limit',
type=argparse_utils.positive_int,
default=None)
parser.add_argument('--object_classification',
type=argparse_utils.existing_file_path,
nargs='+',
default=None)
parser.add_argument('--filter_unclassified',
action='store_true',
default=False)
parser.add_argument('--l2_normalize',
action='store_true',
default=False)
parser.add_argument('--mode',
choices=('tsne', 'embedding_projector'),
default='tsne')
parser.add_argument('--legend',
action='store_true',
default=False)
parser.add_argument('--tick_labels',
action='store_true',
default=False)
parser.add_argument('--edges',
action='store_true',
default=False)
parser.add_argument('--border',
action='store_true',
default=False)
parser.add_argument('--plot_out',
type=argparse_utils.nonexisting_file_path,
required=True)
args = parser.parse_args()
try:
logging_utils.configure_logging(args)
except IOError:
return -1
# Set matplotlib style.
plt.style.use('bmh')
logging.info('Loading index.')
index = pyndri.Index(args.index)
logging.info('Loading cuNVSM model.')
model_base, epoch_and_ext = args.model.rsplit('_', 1)
epoch = int(epoch_and_ext.split('.')[0])
if not os.path.exists('{}_meta'.format(model_base)):
model_meta_base, batch_idx = model_base.rsplit('_', 1)
else:
model_meta_base = model_base
model = nvsm.load_model(
nvsm.load_meta(model_meta_base),
model_base, epoch,
only_object_embeddings=True)
raw_object_representations = np.copy(model.object_representations)
if args.limit:
raw_object_representations = raw_object_representations[:args.limit, :]
for object_classification in args.object_classification:
root, ext = os.path.splitext(args.plot_out)
plot_out = '{}-{}.{}'.format(
root, os.path.basename(object_classification), ext.lstrip('.'))
if object_classification and args.filter_unclassified:
logging.info('Filtering unclassified.')
with open(object_classification, 'r') as f_objects:
object_ids = [line.strip().split()[0] for line in f_objects]
indices = sorted(model.inv_object_mapping[idx]
for _, idx in index.document_ids(object_ids)
if idx in model.inv_object_mapping)
logging.info('Considering %d out of %d representations.',
len(indices), len(object_ids))
translation_table = {idx: i for i, idx in enumerate(indices)}
object_representations = raw_object_representations[indices]
assert object_representations.shape[0] == \
len(translation_table)
else:
translation_table = None
raise NotImplementedError()
logging.info('Loading object clusters.')
cluster_id_to_product_ids = {}
if object_classification:
with open(object_classification, 'r') as f_objects:
for line in f_objects:
object_id, cluster_id = line.strip().split()
if cluster_id not in cluster_id_to_product_ids:
cluster_id_to_product_ids[cluster_id] = set()
cluster_id_to_product_ids[cluster_id].add(object_id)
for cluster_id in list(cluster_id_to_product_ids.keys()):
object_ids = list(cluster_id_to_product_ids[cluster_id])
cluster_id_to_product_ids[cluster_id] = set(
(model.inv_object_mapping[int_object_id]
if translation_table is None
else translation_table[
model.inv_object_mapping[int_object_id]])
for ext_object_id, int_object_id in
index.document_ids(object_ids)
if int_object_id in model.inv_object_mapping and
(args.limit is None or
(model.inv_object_mapping[int_object_id] <
args.limit)))
else:
raise NotImplementedError()
assert len(cluster_id_to_product_ids) < len(MARKERS)
if args.l2_normalize:
logging.info('L2-normalizing representations.')
object_representations /= np.linalg.norm(
object_representations,
axis=1, keepdims=True)
if args.mode == 'tsne':
logging.info('Running t-SNE.')
twodim_object_representations = \
TSNE(n_components=2, init='pca', random_state=0).\
fit_transform(object_representations)
logging.info('Plotting %s.', twodim_object_representations.shape)
colors = cm.rainbow(
np.linspace(0, 1, len(cluster_id_to_product_ids)))
for idx, cluster_id in enumerate(
sorted(cluster_id_to_product_ids.keys(),
key=lambda cluster_id: len(
cluster_id_to_product_ids[cluster_id]),
reverse=True)):
row_ids = list(cluster_id_to_product_ids[cluster_id])
plt.scatter(
twodim_object_representations[row_ids, 0],
twodim_object_representations[row_ids, 1],
marker=MARKERS[idx],
edgecolors='grey' if args.edges else None,
cmap=plt.cm.Spectral,
color=colors[idx],
alpha=0.3,
label=pylatex.utils.escape_latex(cluster_id))
plt.grid()
plt.tight_layout()
if args.legend:
plt.legend(bbox_to_anchor=(0, -0.15, 1, 0),
loc=2,
ncol=2,
mode='expand',
borderaxespad=0)
if not args.tick_labels:
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
if not args.border:
# plt.gcf().patch.set_visible(False)
plt.gca().axis('off')
logging.info('Writing %s.', plot_out)
plt.savefig(plot_out,
bbox_inches='tight',
transparent=True,
pad_inches=0,
dpi=200)
elif args.mode == 'embedding_projector':
logging.info('Dumping to TensorFlow embedding projector format.')
with open('{}_vectors.tsv'.format(plot_out), 'w') as f_vectors, \
open('{}_meta.tsv'.format(plot_out), 'w') as f_meta:
f_meta.write('document_id\tclass\n')
def write_rowids(row_ids, cluster_id):
for row_id in row_ids:
f_vectors.write(
'{}\n'.format('\t'.join(
'{:.5f}'.format(x)
for x in object_representations[row_id])))
f_meta.write('{}\t{}\n'.format(
index.ext_document_id(
model.object_mapping[row_id]),
cluster_id))
for cluster_id in cluster_id_to_product_ids.keys():
row_ids = list(cluster_id_to_product_ids[cluster_id])
write_rowids(row_ids, cluster_id)
logging.info('All done!')
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
import sys
from cvangysel import argparse_utils, logging_utils
import argparse
import logging
import matplotlib.cm as cm
import matplotlib.markers as markers
import matplotlib.pyplot as plt
import numpy as np
import os
import pylatex.utils
import pyndri
from sklearn.manifold import TSNE
import nvsm
MARKERS = ['o', 's', '<', '>', '^', 'v', 'd', 'p', '*', '8',
'1', '2', '3', '4',
markers.TICKLEFT, markers.TICKRIGHT,
markers.TICKUP, markers.TICKDOWN,
markers.CARETLEFT, markers.CARETRIGHT,
markers.CARETUP, markers.CARETDOWN]
plt.rcParams["figure.figsize"] = (8.0, 4.25)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model')
parser.add_argument('index', type=argparse_utils.existing_directory_path)
parser.add_argument('--limit',
type=argparse_utils.positive_int,
default=None)
parser.add_argument('--object_classification',
type=argparse_utils.existing_file_path,
nargs='+',
default=None)
parser.add_argument('--filter_unclassified',
action='store_true',
default=False)
parser.add_argument('--l2_normalize',
action='store_true',
default=False)
parser.add_argument('--mode',
choices=('tsne', 'embedding_projector'),
default='tsne')
parser.add_argument('--legend',
action='store_true',
default=False)
parser.add_argument('--tick_labels',
action='store_true',
default=False)
parser.add_argument('--edges',
action='store_true',
default=False)
parser.add_argument('--border',
action='store_true',
default=False)
parser.add_argument('--plot_out',
type=argparse_utils.nonexisting_file_path,
required=True)
args = parser.parse_args()
try:
logging_utils.configure_logging(args)
except IOError:
return -1
# Set matplotlib style.
plt.style.use('bmh')
logging.info('Loading index.')
index = pyndri.Index(args.index)
logging.info('Loading cuNVSM model.')
model_base, epoch_and_ext = args.model.rsplit('_', 1)
epoch = int(epoch_and_ext.split('.')[0])
if not os.path.exists('{}_meta'.format(model_base)):
model_meta_base, batch_idx = model_base.rsplit('_', 1)
else:
model_meta_base = model_base
model = nvsm.load_model(
nvsm.load_meta(model_meta_base),
model_base, epoch,
only_object_embeddings=True)
raw_object_representations = np.copy(model.object_representations)
if args.limit:
raw_object_representations = raw_object_representations[:args.limit, :]
for object_classification in args.object_classification:
root, ext = os.path.splitext(args.plot_out)
plot_out = '{}-{}.{}'.format(
root, os.path.basename(object_classification), ext.lstrip('.'))
if object_classification and args.filter_unclassified:
logging.info('Filtering unclassified.')
with open(object_classification, 'r') as f_objects:
object_ids = [line.strip().split()[0] for line in f_objects]
indices = sorted(model.inv_object_mapping[idx]
for _, idx in index.document_ids(object_ids)
if idx in model.inv_object_mapping)
logging.info('Considering %d out of %d representations.',
len(indices), len(object_ids))
translation_table = {idx: i for i, idx in enumerate(indices)}
object_representations = raw_object_representations[indices]
assert object_representations.shape[0] == \
len(translation_table)
else:
translation_table = None
raise NotImplementedError()
logging.info('Loading object clusters.')
cluster_id_to_product_ids = {}
if object_classification:
with open(object_classification, 'r') as f_objects:
for line in f_objects:
object_id, cluster_id = line.strip().split()
if cluster_id not in cluster_id_to_product_ids:
cluster_id_to_product_ids[cluster_id] = set()
cluster_id_to_product_ids[cluster_id].add(object_id)
for cluster_id in list(cluster_id_to_product_ids.keys()):
object_ids = list(cluster_id_to_product_ids[cluster_id])
cluster_id_to_product_ids[cluster_id] = set(
(model.inv_object_mapping[int_object_id]
if translation_table is None
else translation_table[
model.inv_object_mapping[int_object_id]])
for ext_object_id, int_object_id in
index.document_ids(object_ids)
if int_object_id in model.inv_object_mapping and
(args.limit is None or
(model.inv_object_mapping[int_object_id] <
args.limit)))
else:
raise NotImplementedError()
assert len(cluster_id_to_product_ids) < len(MARKERS)
if args.l2_normalize:
logging.info('L2-normalizing representations.')
object_representations /= np.linalg.norm(
object_representations,
axis=1, keepdims=True)
if args.mode == 'tsne':
logging.info('Running t-SNE.')
twodim_object_representations = \
TSNE(n_components=2, init='pca', random_state=0).\
fit_transform(object_representations)
logging.info('Plotting %s.', twodim_object_representations.shape)
colors = cm.rainbow(
np.linspace(0, 1, len(cluster_id_to_product_ids)))
for idx, cluster_id in enumerate(
sorted(cluster_id_to_product_ids.keys(),
key=lambda cluster_id: len(
cluster_id_to_product_ids[cluster_id]),
reverse=True)):
row_ids = list(cluster_id_to_product_ids[cluster_id])
plt.scatter(
twodim_object_representations[row_ids, 0],
twodim_object_representations[row_ids, 1],
marker=MARKERS[idx],
edgecolors='grey' if args.edges else None,
cmap=plt.cm.Spectral,
color=colors[idx],
alpha=0.3,
label=pylatex.utils.escape_latex(cluster_id))
plt.grid()
plt.tight_layout()
if args.legend:
plt.legend(bbox_to_anchor=(0, -0.15, 1, 0),
loc=2,
ncol=2,
mode='expand',
borderaxespad=0)
if not args.tick_labels:
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
if not args.border:
# plt.gcf().patch.set_visible(False)
plt.gca().axis('off')
logging.info('Writing %s.', plot_out)
plt.savefig(plot_out,
bbox_inches='tight',
transparent=True,
pad_inches=0,
dpi=200)
elif args.mode == 'embedding_projector':
logging.info('Dumping to TensorFlow embedding projector format.')
with open('{}_vectors.tsv'.format(plot_out), 'w') as f_vectors, \
open('{}_meta.tsv'.format(plot_out), 'w') as f_meta:
f_meta.write('document_id\tclass\n')
def write_rowids(row_ids, cluster_id):
for row_id in row_ids:
f_vectors.write(
'{}\n'.format('\t'.join(
'{:.5f}'.format(x)
for x in object_representations[row_id])))
f_meta.write('{}\t{}\n'.format(
index.ext_document_id(
model.object_mapping[row_id]),
cluster_id))
for cluster_id in cluster_id_to_product_ids.keys():
row_ids = list(cluster_id_to_product_ids[cluster_id])
write_rowids(row_ids, cluster_id)
logging.info('All done!')
if __name__ == '__main__':
sys.exit(main())
| en | 000069431_cvangysel-cuNVSM_visualize_a71df3d22bfa.py | unknown | 2,390 |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
import torch.nn as nn
class ModelInterface(nn.Module):
"""Abstract class for models"""
@abstractmethod
def set_dropout_ratio(self, ratio):
"""Sets dropout ratio of the model"""
@abstractmethod
def get_input_res(self):
"""Returns input resolution"""
from .rmnet_angular import RMNetAngular
from .mobilefacenet import MobileFaceNet
from .landnet import LandmarksNet
from .resnet_angular import ResNetAngular
from .se_resnet_angular import SEResNetAngular
from .shufflenet_v2_angular import ShuffleNetV2Angular
models_backbones = {'rmnet': RMNetAngular, 'mobilenet': MobileFaceNet, 'resnet': ResNetAngular,
'shufflenetv2': ShuffleNetV2Angular, 'se_resnet': SEResNetAngular}
models_landmarks = {'landnet': LandmarksNet}
| """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
import torch.nn as nn
class ModelInterface(nn.Module):
"""Abstract class for models"""
@abstractmethod
def set_dropout_ratio(self, ratio):
"""Sets dropout ratio of the model"""
@abstractmethod
def get_input_res(self):
"""Returns input resolution"""
from .rmnet_angular import RMNetAngular
from .mobilefacenet import MobileFaceNet
from .landnet import LandmarksNet
from .resnet_angular import ResNetAngular
from .se_resnet_angular import SEResNetAngular
from .shufflenet_v2_angular import ShuffleNetV2Angular
models_backbones = {'rmnet': RMNetAngular, 'mobilenet': MobileFaceNet, 'resnet': ResNetAngular,
'shufflenetv2': ShuffleNetV2Angular, 'se_resnet': SEResNetAngular}
models_landmarks = {'landnet': LandmarksNet}
| en | 000600117_xzry6-openvino_training_extensions_common_a2ecae668f17.py | unknown | 371 |
# -*- coding: utf-8 -*-
# @File : sessionio.py
# @Date : 2021/2/25
# @Desc :
from Lib.api import data_return
from Lib.configs import SessionIO_MSG_ZH, METERPRETER_PROMPT, CODE_MSG_ZH, RPC_SESSION_OPER_SHORT_REQ, CODE_MSG_EN, \
SessionIO_MSG_EN
from Lib.log import logger
from Lib.method import Method
from Lib.rpcclient import RpcClient
from Lib.xcache import Xcache
class SessionIO(object):
@staticmethod
def create(ipaddress=None, sessionid=None, user_input=None):
try:
user_input = user_input.strip()
if user_input.startswith('shell'):
command = user_input[len('shell'):].strip()
if len(command) == 0:
new_bufer = "\nNot support switch to Dos/Bash,input like\'shell whoami\' to run os cmd.\n"
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, SessionIO_MSG_ZH.get(200), SessionIO_MSG_EN.get(200))
return context
else:
user_input = f"shell -c '{command}'"
if user_input.startswith('exit'):
params = [sessionid]
result = RpcClient.call(Method.SessionMeterpreterSessionKill, params,
timeout=RPC_SESSION_OPER_SHORT_REQ)
context = data_return(203, result, SessionIO_MSG_ZH.get(203), SessionIO_MSG_EN.get(203))
return context
params = [sessionid, user_input]
result = RpcClient.call(Method.SessionMeterpreterWrite, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None:
context = data_return(305, {}, SessionIO_MSG_ZH.get(305), SessionIO_MSG_EN.get(305))
elif result.get('result') == 'success':
new_bufer = f"{METERPRETER_PROMPT}{user_input}\n"
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, SessionIO_MSG_ZH.get(200), SessionIO_MSG_EN.get(200))
else:
context = data_return(305, {}, SessionIO_MSG_ZH.get(305), SessionIO_MSG_EN.get(305))
except Exception as E:
logger.error(E)
context = data_return(306, {}, SessionIO_MSG_ZH.get(306), SessionIO_MSG_EN.get(306))
return context
@staticmethod
def update(ipaddress=None, sessionid=None):
old_result = Xcache.get_sessionio_cache(ipaddress)
if sessionid is None or sessionid == -1:
context = data_return(202, old_result, SessionIO_MSG_ZH.get(202), SessionIO_MSG_EN.get(202))
return context
try:
params = [sessionid]
result = RpcClient.call(Method.SessionMeterpreterRead, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None or (isinstance(result, dict) is not True):
context = data_return(303, old_result, SessionIO_MSG_ZH.get(303), SessionIO_MSG_EN.get(303))
return context
new_bufer = result.get('data')
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, CODE_MSG_ZH.get(200), CODE_MSG_EN.get(200)) # code特殊处理
except Exception as E:
logger.error(E)
context = data_return(306, old_result, SessionIO_MSG_ZH.get(405), SessionIO_MSG_EN.get(405))
return context
@staticmethod
def destroy(ipaddress=None):
"""清空历史记录"""
result = Xcache.del_sessionio_cache(ipaddress)
context = data_return(204, result, SessionIO_MSG_ZH.get(204), SessionIO_MSG_EN.get(204))
return context
| # -*- coding: utf-8 -*-
# @File : sessionio.py
# @Date : 2021/2/25
# @Desc :
from Lib.api import data_return
from Lib.configs import SessionIO_MSG_ZH, METERPRETER_PROMPT, CODE_MSG_ZH, RPC_SESSION_OPER_SHORT_REQ, CODE_MSG_EN, \
SessionIO_MSG_EN
from Lib.log import logger
from Lib.method import Method
from Lib.rpcclient import RpcClient
from Lib.xcache import Xcache
class SessionIO(object):
@staticmethod
def create(ipaddress=None, sessionid=None, user_input=None):
try:
user_input = user_input.strip()
if user_input.startswith('shell'):
command = user_input[len('shell'):].strip()
if len(command) == 0:
new_bufer = "\nNot support switch to Dos/Bash,input like\'shell whoami\' to run os cmd.\n"
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, SessionIO_MSG_ZH.get(200), SessionIO_MSG_EN.get(200))
return context
else:
user_input = f"shell -c '{command}'"
if user_input.startswith('exit'):
params = [sessionid]
result = RpcClient.call(Method.SessionMeterpreterSessionKill, params,
timeout=RPC_SESSION_OPER_SHORT_REQ)
context = data_return(203, result, SessionIO_MSG_ZH.get(203), SessionIO_MSG_EN.get(203))
return context
params = [sessionid, user_input]
result = RpcClient.call(Method.SessionMeterpreterWrite, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None:
context = data_return(305, {}, SessionIO_MSG_ZH.get(305), SessionIO_MSG_EN.get(305))
elif result.get('result') == 'success':
new_bufer = f"{METERPRETER_PROMPT}{user_input}\n"
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, SessionIO_MSG_ZH.get(200), SessionIO_MSG_EN.get(200))
else:
context = data_return(305, {}, SessionIO_MSG_ZH.get(305), SessionIO_MSG_EN.get(305))
except Exception as E:
logger.error(E)
context = data_return(306, {}, SessionIO_MSG_ZH.get(306), SessionIO_MSG_EN.get(306))
return context
@staticmethod
def update(ipaddress=None, sessionid=None):
old_result = Xcache.get_sessionio_cache(ipaddress)
if sessionid is None or sessionid == -1:
context = data_return(202, old_result, SessionIO_MSG_ZH.get(202), SessionIO_MSG_EN.get(202))
return context
try:
params = [sessionid]
result = RpcClient.call(Method.SessionMeterpreterRead, params, timeout=RPC_SESSION_OPER_SHORT_REQ)
if result is None or (isinstance(result, dict) is not True):
context = data_return(303, old_result, SessionIO_MSG_ZH.get(303), SessionIO_MSG_EN.get(303))
return context
new_bufer = result.get('data')
result = Xcache.add_sessionio_cache(ipaddress, new_bufer)
context = data_return(200, result, CODE_MSG_ZH.get(200), CODE_MSG_EN.get(200)) # code特殊处理
except Exception as E:
logger.error(E)
context = data_return(306, old_result, SessionIO_MSG_ZH.get(405), SessionIO_MSG_EN.get(405))
return context
@staticmethod
def destroy(ipaddress=None):
"""清空历史记录"""
result = Xcache.del_sessionio_cache(ipaddress)
context = data_return(204, result, SessionIO_MSG_ZH.get(204), SessionIO_MSG_EN.get(204))
return context
| en | 000707334_evi1hack-viperpython_sessionio_5ee00cdde83b.py | unknown | 1,178 |
import ctypes
import gc
import logging
import time
from collections import deque
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import angr
import archinfo
from angr import Block, Project, SimState
from angr.engines.successors import SimSuccessors
from cle.backends.elf.metaelf import MetaELF
from capstone import x86_const
from ..errors import HaseError
from ..loader import Loader
from ..progress_log import ProgressLog
from ..pt import Instruction, InstructionClass
from ..pwn_wrapper import ELF, Coredump, Mapping
from .cdanalyzer import CoredumpAnalyzer
from .filter import FilterTrace
from .hook import setup_project_hook
from .start_state import create_start_state
from .state import State, StateManager
l = logging.getLogger(__name__)
def constrain_registers(state: State, coredump: Coredump) -> bool:
# FIXME: if exception caught is omitted by hook?
# If same address, then give registers
if state.registers["rip"].value == coredump.registers["rip"]:
# don't give rbp, rsp
assert state.registers["rsp"].value == coredump.registers["rsp"]
registers = [
"gs",
"rip",
"rdx",
"r15",
"rax",
"rsi",
"rcx",
"r14",
"fs",
"r12",
"r13",
"r10",
"r11",
"rbx",
"r8",
"r9",
"eflags",
"rdi",
]
for name in registers:
state.registers[name] = coredump.registers[name]
return True
else:
l.warning("RIP mismatch.")
arip = state.simstate.regs.rip
crip = hex(coredump.registers["rip"])
arsp = state.simstate.regs.rsp
crsp = hex(coredump.registers["rsp"])
l.warning("{} {} {} {}".format(arip, crip, arsp, crsp))
return False
def repair_syscall_jump(state_block: Any, step: SimSuccessors) -> SimState:
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
# manually syscall will have no entry and just execute it.
if (
ins_repr.startswith("syscall")
and 0x3000000 <= step.successors[0].reg_concrete("rip") < 0x3002000
):
return step.successors[0].step(num_inst=1)
return step
class Tracer:
def __init__(
self,
executable: str,
trace: List[Instruction],
coredump: Coredump,
loader: Loader,
name: str = "(unamed)",
) -> None:
self.name = name
self.executable = executable
# we keep this for debugging in ipdb
self.loader = loader
self.project = loader.angr_project()
assert self.project.loader.main_object.os.startswith("UNIX")
self.coredump = coredump
self.debug_unsat = None # type: Optional[SimState]
self.instruction = None # type: Optional[Instruction]
self.trace = trace
elf = ELF(executable)
start = elf.symbols.get("_start")
main = elf.symbols.get("main")
lib_text_addrs = {} # type: Dict[str, int]
lib_opts = self.loader.load_options()["lib_opts"]
for lib in lib_opts:
lib_text_addrs[lib] = lib_opts[lib]['base_addr'] + MetaELF.get_text_offset(lib)
self.cdanalyzer = CoredumpAnalyzer(
elf, self.coredump, lib_text_addrs
)
for (idx, event) in enumerate(self.trace):
if event.ip == start or event.ip == main:
self.trace = trace[idx:]
self.use_hook = True
hooked_symbols, omitted_section = setup_project_hook(
self.project, self.cdanalyzer.gdb
)
self.filter = FilterTrace(
self.project,
self.trace,
hooked_symbols,
self.cdanalyzer.gdb,
omitted_section,
elf.statically_linked,
name,
)
self.old_trace = self.trace
self.trace, self.trace_idx, self.hook_target = self.filter.filtered_trace()
l.info(
"Trace length: {} | OldTrace length: {}".format(
len(self.trace), len(self.old_trace)
)
)
self.hook_plt_idx = list(self.hook_target.keys())
self.hook_plt_idx.sort()
self.filter.entry_check()
self.start_state = create_start_state(self.project, self.trace, self.cdanalyzer)
self.start_state.inspect.b(
"call", when=angr.BP_BEFORE, action=self.concretize_indirect_calls
)
self.start_state.inspect.b(
"successor", when=angr.BP_AFTER, action=self.concretize_ip
)
def concretize_indirect_calls(self, state: SimState) -> None:
assert self.instruction is not None
if not state.ip.symbolic:
ip = state.solver.eval(state.ip)
assert self.filter.test_plt_vdso(ip) or ip == self.instruction.ip
state.inspect.function_address = self.instruction.ip
def concretize_ip(self, state: SimState) -> None:
assert self.instruction is not None
ip = self.instruction.ip
if state.scratch.target.symbolic:
state.ip = ip
state.add_constraints(state.scratch.target == ip, action=True)
# avoid evaluation of symbolic target
state.scratch.target = ip
def desc_trace(
self,
start: int,
end: Optional[int] = None,
filt: Optional[Callable[[int], bool]] = None,
) -> None:
for i, inst in enumerate(self.trace[start:end]):
if not filt or filt(inst.ip):
print(
i + start,
self.trace_idx[i + start],
hex(inst.ip),
self.project.loader.describe_addr(inst.ip),
)
def desc_old_trace(
self,
start: int,
end: Optional[int] = None,
filt: Optional[Callable[[int], bool]] = None,
) -> None:
for i, inst in enumerate(self.old_trace[start:end]):
if not filt or filt(inst.ip):
print(
i + start, hex(inst.ip), self.project.loader.describe_addr(inst.ip)
)
def desc_addr(self, addr: int) -> str:
return self.project.loader.describe_addr(addr)
def desc_stack_inst(
self, start: int, end: Optional[int] = None, show_extra: bool = True
) -> None:
for i, inst in enumerate(self.trace[start:end]):
blk = self.project.factory.block(inst.ip)
first_ins = blk.capstone.insns[0]
if (
first_ins.mnemonic == "push"
or first_ins.mnemonic == "pop"
or first_ins.mnemonic == "enter"
or first_ins.mnemonic == "leave"
# or first_ins.mnemonic == 'call'
# or first_ins.mnemonic == 'retn'
or (
len(first_ins.operands) > 0
and first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
)
):
if show_extra:
print(
i + start,
self.trace_idx[i + start],
hex(inst.ip),
self.desc_addr(inst.ip),
str(first_ins),
)
else:
print(str(first_ins))
def desc_callstack(self, state: Optional[SimState] = None) -> None:
state = self.debug_state[-1] if state is None else state
callstack = state.callstack
for i, c in enumerate(callstack):
print(
"Frame {}: {} => {}, sp = {}".format(
i,
self.desc_addr(c.call_site_addr),
self.desc_addr(c.func_addr),
hex(c.stack_ptr),
)
)
def repair_exit_handler(self, state: SimState, step: SimSuccessors) -> SimState:
artifacts = getattr(step, "artifacts", None)
if (
artifacts
and "procedure" in artifacts.keys()
and artifacts["name"] == "exit"
):
if len(state.libc.exit_handler):
addr = state.libc.exit_handler[0]
step = self.project.factory.successors(
state, num_inst=1, force_addr=addr
)
return step
def repair_alloca_ins(self, state: SimState, state_block: Block) -> None:
# NOTE: alloca problem, focus on sub rsp, rax
# Typical usage: alloca(strlen(x))
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
if first_ins.mnemonic == "sub":
if (
first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
and first_ins.operands[1].type == 1
):
reg_name = first_ins.reg_name(first_ins.operands[1].reg)
reg_v = getattr(state.regs, reg_name)
if state.solver.symbolic(reg_v):
setattr(state.regs, reg_name, state.libc.max_str_len)
def repair_jump_ins(
self,
state: SimState,
state_block: Any,
previous_instruction: Instruction,
instruction: Instruction,
) -> Tuple[bool, str]:
# NOTE: typical case: switch(getchar())
if previous_instruction.iclass == InstructionClass.ptic_other:
return False, ""
jump_ins = ["jmp", "call"] # currently not deal with jcc regs
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
if ins_repr.startswith("ret"):
if not state.solver.symbolic(state.regs.rsp):
mem = state.memory.load(state.regs.rsp, 8)
jump_target = 0
if not state.solver.symbolic(mem):
jump_target = state.solver.eval(mem)
if jump_target != instruction.ip:
return True, "ret"
else:
return True, "ok"
else:
return True, "ret"
for ins in jump_ins:
if ins_repr.startswith(ins):
# call rax
if first_ins.operands[0].type == 1:
reg_name = first_ins.op_str
reg_v = getattr(state.regs, reg_name)
if (
state.solver.symbolic(reg_v)
or state.solver.eval(reg_v) != instruction.ip
):
setattr(state.regs, reg_name, instruction.ip)
return True, ins
# jmp 0xaabb
if first_ins.operands[0].type == 2:
return True, ins
# jmp [base + index*scale + disp]
if first_ins.operands[0].type == 3:
self.last_jump_table = state
mem = first_ins.operands[0].value.mem
target = mem.disp
if mem.index:
reg_index_name = first_ins.reg_name(mem.index)
reg_index = getattr(state.regs, reg_index_name)
if state.solver.symbolic(reg_index):
return True, ins
else:
target += state.solver.eval(reg_index) * mem.scale
if mem.base:
reg_base_name = first_ins.reg_name(mem.base)
reg_base = getattr(state.regs, reg_base_name)
if state.solver.symbolic(reg_base):
return True, ins
else:
target += state.solver.eval(reg_base)
ip_mem = state.memory.load(target, 8, endness="Iend_LE")
if not state.solver.symbolic(ip_mem):
jump_target = state.solver.eval(ip_mem)
if jump_target != instruction.ip:
return True, ins
else:
return True, "ok"
else:
return True, ins
return False, "ok"
def repair_ip(self, state: SimState) -> int:
try:
addr = state.solver.eval(state._ip)
# NOTE: repair IFuncResolver
if (
self.project.loader.find_object_containing(addr)
== self.project.loader.extern_object
):
func = self.project._sim_procedures.get(addr, None)
if func:
funcname = func.kwargs["funcname"]
libf = self.project.loader.find_symbol(funcname)
if libf:
addr = libf.rebased_addr
except Exception:
logging.exception("Error while repairing ip for {}".format(self.name))
# NOTE: currently just try to repair ip for syscall
addr = self.debug_state[-2].addr
return addr
def repair_func_resolver(self, state: SimState, step: SimSuccessors) -> SimState:
artifacts = getattr(step, "artifacts", None)
if (
artifacts
and "procedure" in artifacts.keys()
and artifacts["name"] == "IFuncResolver"
):
func = self.filter.find_function(self.debug_state[-2].addr)
if func:
addr = self.project.loader.find_symbol(func.name).rebased_addr
step = self.project.factory.successors(
state, num_inst=1, force_addr=addr
)
else:
raise HaseError("Cannot resolve function")
return step
def last_match(self, choice: SimState, instruction: Instruction) -> bool:
# if last trace is A -> A
if (
instruction == self.trace[-1]
and len(self.trace) > 2
and self.trace[-1].ip == self.trace[-2].ip
):
if choice.addr == instruction.ip:
return True
return False
def jump_match(
self,
old_state: SimState,
choice: SimState,
previous_instruction: Instruction,
instruction: Instruction,
) -> bool:
if choice.addr == instruction.ip:
l.debug("jump 0%x -> 0%x", previous_instruction.ip, choice.addr)
return True
return False
def repair_satness(self, old_state: SimState, new_state: SimState) -> None:
if not new_state.solver.satisfiable():
new_state.solver._stored_solver = old_state.solver._solver.branch()
if not self.debug_unsat:
self.debug_sat = old_state
self.debug_unsat = new_state
def repair_ip_at_syscall(self, old_block: Block, new_state: SimState) -> None:
capstone = old_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
if ins_repr.startswith("syscall"):
new_state.regs.ip_at_syscall = new_state.ip
def post_execute(
self, old_state: SimState, old_block: Block, state: SimState
) -> None:
self.repair_satness(old_state, state)
self.repair_ip_at_syscall(old_block, state)
def execute(
self,
state: SimState,
previous_instruction: Instruction,
instruction: Instruction,
index: int,
) -> Tuple[SimState, SimState]:
self.debug_state.append(state)
state_block = state.block() # type: Block
force_jump, force_type = self.repair_jump_ins(
state, state_block, previous_instruction, instruction
)
self.repair_alloca_ins(state, state_block)
try:
step = self.project.factory.successors(
state, num_inst=1 # , force_addr=addr
)
step = repair_syscall_jump(state_block, step)
step = self.repair_func_resolver(state, step)
step = self.repair_exit_handler(state, step)
except Exception:
logging.exception("Error while finding successor for {}".format(self.name))
new_state = state.copy()
new_state.regs.ip = instruction.ip
self.post_execute(state, state_block, new_state)
return state, new_state
if force_jump:
new_state = state.copy()
if force_type == "call":
if not self.project.is_hooked(instruction.ip):
new_state.regs.rsp -= 8
ret_addr = state.addr + state_block.capstone.insns[0].size
new_state.memory.store(
new_state.regs.rsp, ret_addr, endness="Iend_LE"
)
elif force_type == "ret":
new_state.regs.rsp += 8
new_state.regs.ip = instruction.ip
choices = [new_state]
else:
choices = step.successors + step.unsat_successors
old_state = state
l.info(repr(state) + " " + repr(previous_instruction) + " " + repr(instruction))
for choice in choices:
# HACKS: if ip is symbolic
try:
if self.last_match(choice, instruction):
return choice, choice
if self.jump_match(
old_state, choice, previous_instruction, instruction
):
self.post_execute(old_state, state_block, choice)
return old_state, choice
except angr.SimValueError:
logging.exception("Error while jumping in {}".format(self.name))
pass
new_state = state.copy()
new_state.regs.ip = instruction.ip
return state, new_state
def valid_address(self, address: int) -> bool:
return self.project.loader.find_object_containing(address)
def run(self) -> StateManager:
simstate = self.start_state
states = StateManager(self, len(self.trace) + 1)
states.add_major(State(0, None, self.trace[0], None, simstate))
self.debug_unsat = None # type: Optional[SimState]
self.debug_state = deque(maxlen=50) # type: deque
self.skip_addr = {} # type: Dict[int, int]
cnt = -1
interval = max(1, len(self.trace) // 200)
length = len(self.trace) - 1
l.info("start processing trace")
progress_log = ProgressLog(
name="process trace of {}".format(self.name),
total_steps=len(self.trace),
log_frequency=int(1e3),
kill_limit=60 * 60 * 24,
)
# prev_instr.ip == state.ip
for previous_idx in range(len(self.trace) - 1):
previous_instruction = self.trace[previous_idx]
if previous_idx + 1 >= len(self.trace):
self.instruction = self.trace[previous_idx]
else:
self.instruction = self.trace[previous_idx + 1]
cnt += 1
progress_log.update(cnt)
if not cnt % 500:
gc.collect()
assert self.valid_address(self.instruction.ip)
old_simstate, new_simstate = self.execute(
simstate, previous_instruction, self.instruction, cnt
)
simstate = new_simstate
if cnt % interval == 0 or length - cnt < 15:
states.add_major(
State(
cnt,
previous_instruction,
self.instruction,
old_simstate,
new_simstate,
)
)
if (
self.project.loader.find_object_containing(self.instruction.ip)
== self.project.loader.main_object
):
states.last_main_state = State(
cnt,
previous_instruction,
self.instruction,
old_simstate,
new_simstate,
)
constrain_registers(states.major_states[-1], self.coredump)
return states
| import ctypes
import gc
import logging
import time
from collections import deque
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import angr
import archinfo
from angr import Block, Project, SimState
from angr.engines.successors import SimSuccessors
from cle.backends.elf.metaelf import MetaELF
from capstone import x86_const
from ..errors import HaseError
from ..loader import Loader
from ..progress_log import ProgressLog
from ..pt import Instruction, InstructionClass
from ..pwn_wrapper import ELF, Coredump, Mapping
from .cdanalyzer import CoredumpAnalyzer
from .filter import FilterTrace
from .hook import setup_project_hook
from .start_state import create_start_state
from .state import State, StateManager
l = logging.getLogger(__name__)
def constrain_registers(state: State, coredump: Coredump) -> bool:
# FIXME: if exception caught is omitted by hook?
# If same address, then give registers
if state.registers["rip"].value == coredump.registers["rip"]:
# don't give rbp, rsp
assert state.registers["rsp"].value == coredump.registers["rsp"]
registers = [
"gs",
"rip",
"rdx",
"r15",
"rax",
"rsi",
"rcx",
"r14",
"fs",
"r12",
"r13",
"r10",
"r11",
"rbx",
"r8",
"r9",
"eflags",
"rdi",
]
for name in registers:
state.registers[name] = coredump.registers[name]
return True
else:
l.warning("RIP mismatch.")
arip = state.simstate.regs.rip
crip = hex(coredump.registers["rip"])
arsp = state.simstate.regs.rsp
crsp = hex(coredump.registers["rsp"])
l.warning("{} {} {} {}".format(arip, crip, arsp, crsp))
return False
def repair_syscall_jump(state_block: Any, step: SimSuccessors) -> SimState:
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
# manually syscall will have no entry and just execute it.
if (
ins_repr.startswith("syscall")
and 0x3000000 <= step.successors[0].reg_concrete("rip") < 0x3002000
):
return step.successors[0].step(num_inst=1)
return step
class Tracer:
def __init__(
self,
executable: str,
trace: List[Instruction],
coredump: Coredump,
loader: Loader,
name: str = "(unamed)",
) -> None:
self.name = name
self.executable = executable
# we keep this for debugging in ipdb
self.loader = loader
self.project = loader.angr_project()
assert self.project.loader.main_object.os.startswith("UNIX")
self.coredump = coredump
self.debug_unsat = None # type: Optional[SimState]
self.instruction = None # type: Optional[Instruction]
self.trace = trace
elf = ELF(executable)
start = elf.symbols.get("_start")
main = elf.symbols.get("main")
lib_text_addrs = {} # type: Dict[str, int]
lib_opts = self.loader.load_options()["lib_opts"]
for lib in lib_opts:
lib_text_addrs[lib] = lib_opts[lib]['base_addr'] + MetaELF.get_text_offset(lib)
self.cdanalyzer = CoredumpAnalyzer(
elf, self.coredump, lib_text_addrs
)
for (idx, event) in enumerate(self.trace):
if event.ip == start or event.ip == main:
self.trace = trace[idx:]
self.use_hook = True
hooked_symbols, omitted_section = setup_project_hook(
self.project, self.cdanalyzer.gdb
)
self.filter = FilterTrace(
self.project,
self.trace,
hooked_symbols,
self.cdanalyzer.gdb,
omitted_section,
elf.statically_linked,
name,
)
self.old_trace = self.trace
self.trace, self.trace_idx, self.hook_target = self.filter.filtered_trace()
l.info(
"Trace length: {} | OldTrace length: {}".format(
len(self.trace), len(self.old_trace)
)
)
self.hook_plt_idx = list(self.hook_target.keys())
self.hook_plt_idx.sort()
self.filter.entry_check()
self.start_state = create_start_state(self.project, self.trace, self.cdanalyzer)
self.start_state.inspect.b(
"call", when=angr.BP_BEFORE, action=self.concretize_indirect_calls
)
self.start_state.inspect.b(
"successor", when=angr.BP_AFTER, action=self.concretize_ip
)
def concretize_indirect_calls(self, state: SimState) -> None:
assert self.instruction is not None
if not state.ip.symbolic:
ip = state.solver.eval(state.ip)
assert self.filter.test_plt_vdso(ip) or ip == self.instruction.ip
state.inspect.function_address = self.instruction.ip
def concretize_ip(self, state: SimState) -> None:
assert self.instruction is not None
ip = self.instruction.ip
if state.scratch.target.symbolic:
state.ip = ip
state.add_constraints(state.scratch.target == ip, action=True)
# avoid evaluation of symbolic target
state.scratch.target = ip
def desc_trace(
self,
start: int,
end: Optional[int] = None,
filt: Optional[Callable[[int], bool]] = None,
) -> None:
for i, inst in enumerate(self.trace[start:end]):
if not filt or filt(inst.ip):
print(
i + start,
self.trace_idx[i + start],
hex(inst.ip),
self.project.loader.describe_addr(inst.ip),
)
def desc_old_trace(
self,
start: int,
end: Optional[int] = None,
filt: Optional[Callable[[int], bool]] = None,
) -> None:
for i, inst in enumerate(self.old_trace[start:end]):
if not filt or filt(inst.ip):
print(
i + start, hex(inst.ip), self.project.loader.describe_addr(inst.ip)
)
def desc_addr(self, addr: int) -> str:
return self.project.loader.describe_addr(addr)
def desc_stack_inst(
self, start: int, end: Optional[int] = None, show_extra: bool = True
) -> None:
for i, inst in enumerate(self.trace[start:end]):
blk = self.project.factory.block(inst.ip)
first_ins = blk.capstone.insns[0]
if (
first_ins.mnemonic == "push"
or first_ins.mnemonic == "pop"
or first_ins.mnemonic == "enter"
or first_ins.mnemonic == "leave"
# or first_ins.mnemonic == 'call'
# or first_ins.mnemonic == 'retn'
or (
len(first_ins.operands) > 0
and first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
)
):
if show_extra:
print(
i + start,
self.trace_idx[i + start],
hex(inst.ip),
self.desc_addr(inst.ip),
str(first_ins),
)
else:
print(str(first_ins))
def desc_callstack(self, state: Optional[SimState] = None) -> None:
state = self.debug_state[-1] if state is None else state
callstack = state.callstack
for i, c in enumerate(callstack):
print(
"Frame {}: {} => {}, sp = {}".format(
i,
self.desc_addr(c.call_site_addr),
self.desc_addr(c.func_addr),
hex(c.stack_ptr),
)
)
def repair_exit_handler(self, state: SimState, step: SimSuccessors) -> SimState:
artifacts = getattr(step, "artifacts", None)
if (
artifacts
and "procedure" in artifacts.keys()
and artifacts["name"] == "exit"
):
if len(state.libc.exit_handler):
addr = state.libc.exit_handler[0]
step = self.project.factory.successors(
state, num_inst=1, force_addr=addr
)
return step
def repair_alloca_ins(self, state: SimState, state_block: Block) -> None:
# NOTE: alloca problem, focus on sub rsp, rax
# Typical usage: alloca(strlen(x))
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
if first_ins.mnemonic == "sub":
if (
first_ins.operands[0].reg
in (x86_const.X86_REG_RSP, x86_const.X86_REG_RBP)
and first_ins.operands[1].type == 1
):
reg_name = first_ins.reg_name(first_ins.operands[1].reg)
reg_v = getattr(state.regs, reg_name)
if state.solver.symbolic(reg_v):
setattr(state.regs, reg_name, state.libc.max_str_len)
def repair_jump_ins(
self,
state: SimState,
state_block: Any,
previous_instruction: Instruction,
instruction: Instruction,
) -> Tuple[bool, str]:
# NOTE: typical case: switch(getchar())
if previous_instruction.iclass == InstructionClass.ptic_other:
return False, ""
jump_ins = ["jmp", "call"] # currently not deal with jcc regs
capstone = state_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
if ins_repr.startswith("ret"):
if not state.solver.symbolic(state.regs.rsp):
mem = state.memory.load(state.regs.rsp, 8)
jump_target = 0
if not state.solver.symbolic(mem):
jump_target = state.solver.eval(mem)
if jump_target != instruction.ip:
return True, "ret"
else:
return True, "ok"
else:
return True, "ret"
for ins in jump_ins:
if ins_repr.startswith(ins):
# call rax
if first_ins.operands[0].type == 1:
reg_name = first_ins.op_str
reg_v = getattr(state.regs, reg_name)
if (
state.solver.symbolic(reg_v)
or state.solver.eval(reg_v) != instruction.ip
):
setattr(state.regs, reg_name, instruction.ip)
return True, ins
# jmp 0xaabb
if first_ins.operands[0].type == 2:
return True, ins
# jmp [base + index*scale + disp]
if first_ins.operands[0].type == 3:
self.last_jump_table = state
mem = first_ins.operands[0].value.mem
target = mem.disp
if mem.index:
reg_index_name = first_ins.reg_name(mem.index)
reg_index = getattr(state.regs, reg_index_name)
if state.solver.symbolic(reg_index):
return True, ins
else:
target += state.solver.eval(reg_index) * mem.scale
if mem.base:
reg_base_name = first_ins.reg_name(mem.base)
reg_base = getattr(state.regs, reg_base_name)
if state.solver.symbolic(reg_base):
return True, ins
else:
target += state.solver.eval(reg_base)
ip_mem = state.memory.load(target, 8, endness="Iend_LE")
if not state.solver.symbolic(ip_mem):
jump_target = state.solver.eval(ip_mem)
if jump_target != instruction.ip:
return True, ins
else:
return True, "ok"
else:
return True, ins
return False, "ok"
def repair_ip(self, state: SimState) -> int:
try:
addr = state.solver.eval(state._ip)
# NOTE: repair IFuncResolver
if (
self.project.loader.find_object_containing(addr)
== self.project.loader.extern_object
):
func = self.project._sim_procedures.get(addr, None)
if func:
funcname = func.kwargs["funcname"]
libf = self.project.loader.find_symbol(funcname)
if libf:
addr = libf.rebased_addr
except Exception:
logging.exception("Error while repairing ip for {}".format(self.name))
# NOTE: currently just try to repair ip for syscall
addr = self.debug_state[-2].addr
return addr
def repair_func_resolver(self, state: SimState, step: SimSuccessors) -> SimState:
artifacts = getattr(step, "artifacts", None)
if (
artifacts
and "procedure" in artifacts.keys()
and artifacts["name"] == "IFuncResolver"
):
func = self.filter.find_function(self.debug_state[-2].addr)
if func:
addr = self.project.loader.find_symbol(func.name).rebased_addr
step = self.project.factory.successors(
state, num_inst=1, force_addr=addr
)
else:
raise HaseError("Cannot resolve function")
return step
def last_match(self, choice: SimState, instruction: Instruction) -> bool:
# if last trace is A -> A
if (
instruction == self.trace[-1]
and len(self.trace) > 2
and self.trace[-1].ip == self.trace[-2].ip
):
if choice.addr == instruction.ip:
return True
return False
def jump_match(
self,
old_state: SimState,
choice: SimState,
previous_instruction: Instruction,
instruction: Instruction,
) -> bool:
if choice.addr == instruction.ip:
l.debug("jump 0%x -> 0%x", previous_instruction.ip, choice.addr)
return True
return False
def repair_satness(self, old_state: SimState, new_state: SimState) -> None:
if not new_state.solver.satisfiable():
new_state.solver._stored_solver = old_state.solver._solver.branch()
if not self.debug_unsat:
self.debug_sat = old_state
self.debug_unsat = new_state
def repair_ip_at_syscall(self, old_block: Block, new_state: SimState) -> None:
capstone = old_block.capstone
first_ins = capstone.insns[0].insn
ins_repr = first_ins.mnemonic
if ins_repr.startswith("syscall"):
new_state.regs.ip_at_syscall = new_state.ip
def post_execute(
self, old_state: SimState, old_block: Block, state: SimState
) -> None:
self.repair_satness(old_state, state)
self.repair_ip_at_syscall(old_block, state)
def execute(
self,
state: SimState,
previous_instruction: Instruction,
instruction: Instruction,
index: int,
) -> Tuple[SimState, SimState]:
self.debug_state.append(state)
state_block = state.block() # type: Block
force_jump, force_type = self.repair_jump_ins(
state, state_block, previous_instruction, instruction
)
self.repair_alloca_ins(state, state_block)
try:
step = self.project.factory.successors(
state, num_inst=1 # , force_addr=addr
)
step = repair_syscall_jump(state_block, step)
step = self.repair_func_resolver(state, step)
step = self.repair_exit_handler(state, step)
except Exception:
logging.exception("Error while finding successor for {}".format(self.name))
new_state = state.copy()
new_state.regs.ip = instruction.ip
self.post_execute(state, state_block, new_state)
return state, new_state
if force_jump:
new_state = state.copy()
if force_type == "call":
if not self.project.is_hooked(instruction.ip):
new_state.regs.rsp -= 8
ret_addr = state.addr + state_block.capstone.insns[0].size
new_state.memory.store(
new_state.regs.rsp, ret_addr, endness="Iend_LE"
)
elif force_type == "ret":
new_state.regs.rsp += 8
new_state.regs.ip = instruction.ip
choices = [new_state]
else:
choices = step.successors + step.unsat_successors
old_state = state
l.info(repr(state) + " " + repr(previous_instruction) + " " + repr(instruction))
for choice in choices:
# HACKS: if ip is symbolic
try:
if self.last_match(choice, instruction):
return choice, choice
if self.jump_match(
old_state, choice, previous_instruction, instruction
):
self.post_execute(old_state, state_block, choice)
return old_state, choice
except angr.SimValueError:
logging.exception("Error while jumping in {}".format(self.name))
pass
new_state = state.copy()
new_state.regs.ip = instruction.ip
return state, new_state
def valid_address(self, address: int) -> bool:
return self.project.loader.find_object_containing(address)
def run(self) -> StateManager:
simstate = self.start_state
states = StateManager(self, len(self.trace) + 1)
states.add_major(State(0, None, self.trace[0], None, simstate))
self.debug_unsat = None # type: Optional[SimState]
self.debug_state = deque(maxlen=50) # type: deque
self.skip_addr = {} # type: Dict[int, int]
cnt = -1
interval = max(1, len(self.trace) // 200)
length = len(self.trace) - 1
l.info("start processing trace")
progress_log = ProgressLog(
name="process trace of {}".format(self.name),
total_steps=len(self.trace),
log_frequency=int(1e3),
kill_limit=60 * 60 * 24,
)
# prev_instr.ip == state.ip
for previous_idx in range(len(self.trace) - 1):
previous_instruction = self.trace[previous_idx]
if previous_idx + 1 >= len(self.trace):
self.instruction = self.trace[previous_idx]
else:
self.instruction = self.trace[previous_idx + 1]
cnt += 1
progress_log.update(cnt)
if not cnt % 500:
gc.collect()
assert self.valid_address(self.instruction.ip)
old_simstate, new_simstate = self.execute(
simstate, previous_instruction, self.instruction, cnt
)
simstate = new_simstate
if cnt % interval == 0 or length - cnt < 15:
states.add_major(
State(
cnt,
previous_instruction,
self.instruction,
old_simstate,
new_simstate,
)
)
if (
self.project.loader.find_object_containing(self.instruction.ip)
== self.project.loader.main_object
):
states.last_main_state = State(
cnt,
previous_instruction,
self.instruction,
old_simstate,
new_simstate,
)
constrain_registers(states.major_states[-1], self.coredump)
return states
| en | 000661692_efeslab-hase_tracer_20ccb368c3b8.py | unknown | 5,726 |
import json
import time
import torch
import random
import numpy as np
from pprint import pprint
from argus.callbacks import MonitorCheckpoint, \
EarlyStopping, LoggingToFile, ReduceLROnPlateau
from torch.utils.data import DataLoader
from src.stacking.datasets import get_out_of_folds_data, StackingDataset
from src.stacking.transforms import get_transforms
from src.stacking.argus_models import StackingModel
from src import config
EXPERIMENT_NAME = 'fcnet_stacking_rs_004'
START_FROM = 0
EXPERIMENTS = [
'auxiliary_007',
'auxiliary_010',
'auxiliary_012',
'auxiliary_014'
]
DATASET_SIZE = 128 * 256
CORRECTIONS = True
if config.kernel:
NUM_WORKERS = 2
else:
NUM_WORKERS = 4
SAVE_DIR = config.experiments_dir / EXPERIMENT_NAME
def train_folds(save_dir, folds_data):
random_params = {
'base_size': int(np.random.choice([64, 128, 256, 512])),
'reduction_scale': int(np.random.choice([2, 4, 8, 16])),
'p_dropout': float(np.random.uniform(0.0, 0.5)),
'lr': float(np.random.uniform(0.0001, 0.00001)),
'patience': int(np.random.randint(3, 12)),
'factor': float(np.random.uniform(0.5, 0.8)),
'batch_size': int(np.random.choice([32, 64, 128])),
}
pprint(random_params)
save_dir.mkdir(parents=True, exist_ok=True)
with open(save_dir / 'random_params.json', 'w') as outfile:
json.dump(random_params, outfile)
params = {
'nn_module': ('FCNet', {
'in_channels': len(config.classes) * len(EXPERIMENTS),
'num_classes': len(config.classes),
'base_size': random_params['base_size'],
'reduction_scale': random_params['reduction_scale'],
'p_dropout': random_params['p_dropout']
}),
'loss': 'BCEWithLogitsLoss',
'optimizer': ('Adam', {'lr': random_params['lr']}),
'device': 'cuda',
}
for fold in config.folds:
val_folds = [fold]
train_folds = list(set(config.folds) - set(val_folds))
save_fold_dir = save_dir / f'fold_{fold}'
print(f"Val folds: {val_folds}, Train folds: {train_folds}")
print(f"Fold save dir {save_fold_dir}")
train_dataset = StackingDataset(folds_data, train_folds,
get_transforms(True),
DATASET_SIZE)
val_dataset = StackingDataset(folds_data, val_folds,
get_transforms(False))
train_loader = DataLoader(train_dataset,
batch_size=random_params['batch_size'],
shuffle=True, drop_last=True,
num_workers=NUM_WORKERS)
val_loader = DataLoader(val_dataset,
batch_size=random_params['batch_size'] * 2,
shuffle=False, num_workers=NUM_WORKERS)
model = StackingModel(params)
callbacks = [
MonitorCheckpoint(save_fold_dir, monitor='val_lwlrap', max_saves=1),
ReduceLROnPlateau(monitor='val_lwlrap',
patience=random_params['patience'],
factor=random_params['factor'],
min_lr=1e-8),
EarlyStopping(monitor='val_lwlrap', patience=20),
LoggingToFile(save_fold_dir / 'log.txt'),
]
model.fit(train_loader,
val_loader=val_loader,
max_epochs=300,
callbacks=callbacks,
metrics=['multi_accuracy', 'lwlrap'])
if __name__ == "__main__":
SAVE_DIR.mkdir(parents=True, exist_ok=True)
with open(SAVE_DIR / 'source.py', 'w') as outfile:
outfile.write(open(__file__).read())
if CORRECTIONS:
with open(config.corrections_json_path) as file:
corrections = json.load(file)
print("Corrections:", corrections)
else:
corrections = None
folds_data = get_out_of_folds_data(EXPERIMENTS, corrections)
for num in range(START_FROM, 10000):
np.random.seed(num)
random.seed(num)
save_dir = SAVE_DIR / f'{num:04}'
train_folds(save_dir, folds_data)
time.sleep(5.0)
torch.cuda.empty_cache()
time.sleep(5.0)
| import json
import time
import torch
import random
import numpy as np
from pprint import pprint
from argus.callbacks import MonitorCheckpoint, \
EarlyStopping, LoggingToFile, ReduceLROnPlateau
from torch.utils.data import DataLoader
from src.stacking.datasets import get_out_of_folds_data, StackingDataset
from src.stacking.transforms import get_transforms
from src.stacking.argus_models import StackingModel
from src import config
EXPERIMENT_NAME = 'fcnet_stacking_rs_004'
START_FROM = 0
EXPERIMENTS = [
'auxiliary_007',
'auxiliary_010',
'auxiliary_012',
'auxiliary_014'
]
DATASET_SIZE = 128 * 256
CORRECTIONS = True
if config.kernel:
NUM_WORKERS = 2
else:
NUM_WORKERS = 4
SAVE_DIR = config.experiments_dir / EXPERIMENT_NAME
def train_folds(save_dir, folds_data):
random_params = {
'base_size': int(np.random.choice([64, 128, 256, 512])),
'reduction_scale': int(np.random.choice([2, 4, 8, 16])),
'p_dropout': float(np.random.uniform(0.0, 0.5)),
'lr': float(np.random.uniform(0.0001, 0.00001)),
'patience': int(np.random.randint(3, 12)),
'factor': float(np.random.uniform(0.5, 0.8)),
'batch_size': int(np.random.choice([32, 64, 128])),
}
pprint(random_params)
save_dir.mkdir(parents=True, exist_ok=True)
with open(save_dir / 'random_params.json', 'w') as outfile:
json.dump(random_params, outfile)
params = {
'nn_module': ('FCNet', {
'in_channels': len(config.classes) * len(EXPERIMENTS),
'num_classes': len(config.classes),
'base_size': random_params['base_size'],
'reduction_scale': random_params['reduction_scale'],
'p_dropout': random_params['p_dropout']
}),
'loss': 'BCEWithLogitsLoss',
'optimizer': ('Adam', {'lr': random_params['lr']}),
'device': 'cuda',
}
for fold in config.folds:
val_folds = [fold]
train_folds = list(set(config.folds) - set(val_folds))
save_fold_dir = save_dir / f'fold_{fold}'
print(f"Val folds: {val_folds}, Train folds: {train_folds}")
print(f"Fold save dir {save_fold_dir}")
train_dataset = StackingDataset(folds_data, train_folds,
get_transforms(True),
DATASET_SIZE)
val_dataset = StackingDataset(folds_data, val_folds,
get_transforms(False))
train_loader = DataLoader(train_dataset,
batch_size=random_params['batch_size'],
shuffle=True, drop_last=True,
num_workers=NUM_WORKERS)
val_loader = DataLoader(val_dataset,
batch_size=random_params['batch_size'] * 2,
shuffle=False, num_workers=NUM_WORKERS)
model = StackingModel(params)
callbacks = [
MonitorCheckpoint(save_fold_dir, monitor='val_lwlrap', max_saves=1),
ReduceLROnPlateau(monitor='val_lwlrap',
patience=random_params['patience'],
factor=random_params['factor'],
min_lr=1e-8),
EarlyStopping(monitor='val_lwlrap', patience=20),
LoggingToFile(save_fold_dir / 'log.txt'),
]
model.fit(train_loader,
val_loader=val_loader,
max_epochs=300,
callbacks=callbacks,
metrics=['multi_accuracy', 'lwlrap'])
if __name__ == "__main__":
SAVE_DIR.mkdir(parents=True, exist_ok=True)
with open(SAVE_DIR / 'source.py', 'w') as outfile:
outfile.write(open(__file__).read())
if CORRECTIONS:
with open(config.corrections_json_path) as file:
corrections = json.load(file)
print("Corrections:", corrections)
else:
corrections = None
folds_data = get_out_of_folds_data(EXPERIMENTS, corrections)
for num in range(START_FROM, 10000):
np.random.seed(num)
random.seed(num)
save_dir = SAVE_DIR / f'{num:04}'
train_folds(save_dir, folds_data)
time.sleep(5.0)
torch.cuda.empty_cache()
time.sleep(5.0)
| en | 000354174_wubinbai-argus-freesound_stacking_random_search_7c250aa8a89c.py | unknown | 1,380 |
"""Add onboarding email fields to user
Revision ID: 2c6aaada8bff
Revises: f4a49acd8801
Create Date: 2021-05-02 12:25:35.640366
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "2c6aaada8bff"
down_revision = "f4a49acd8801"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("users", sa.Column("last_onboarding_email_sent", sa.DateTime(timezone=True), nullable=True))
op.add_column("users", sa.Column("onboarding_emails_sent", sa.Integer(), server_default="0", nullable=False))
op.add_column("users", sa.Column("added_to_mailing_list", sa.Boolean(), server_default="false", nullable=False))
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'send_onboarding_emails'")
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'add_users_to_email_list'")
def downgrade():
op.drop_column("users", "added_to_mailing_list")
op.drop_column("users", "onboarding_emails_sent")
op.drop_column("users", "last_onboarding_email_sent")
| """Add onboarding email fields to user
Revision ID: 2c6aaada8bff
Revises: f4a49acd8801
Create Date: 2021-05-02 12:25:35.640366
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "2c6aaada8bff"
down_revision = "f4a49acd8801"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("users", sa.Column("last_onboarding_email_sent", sa.DateTime(timezone=True), nullable=True))
op.add_column("users", sa.Column("onboarding_emails_sent", sa.Integer(), server_default="0", nullable=False))
op.add_column("users", sa.Column("added_to_mailing_list", sa.Boolean(), server_default="false", nullable=False))
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'send_onboarding_emails'")
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'add_users_to_email_list'")
def downgrade():
op.drop_column("users", "added_to_mailing_list")
op.drop_column("users", "onboarding_emails_sent")
op.drop_column("users", "last_onboarding_email_sent")
| en | 000485161_foormea-couchers_2c6aaada8bff_add_onboarding_email_fields_to_user_ee3c4a3b27c7.py | unknown | 371 |
from clld.web.adapters.geojson import GeoJson, get_lonlat
from clld.web.maps import Map, ParameterMap, Layer
class LanguagesMap(Map):
def get_options(self):
return {'icon_size': 20, 'no_showlabels': True}
class SegmentMap(ParameterMap):
def get_options(self):
return {'icon_size': 20}
class InventoryMap(Map):
def get_options(self):
return {'icon_size': 20}
def get_layers(self):
yield Layer(
self.ctx.id,
self.ctx.name,
GeoJson(self.ctx).render(self.ctx.language, self.req, dump=False))
def get_default_options(self):
return {
'center': list(reversed(get_lonlat(self.ctx.language) or [0, 0])),
'zoom': 3,
'no_popup': True,
'no_link': True,
'sidebar': True}
def includeme(config):
config.register_map('languages', LanguagesMap)
config.register_map('parameter', SegmentMap)
config.register_map('contribution', InventoryMap)
| from clld.web.adapters.geojson import GeoJson, get_lonlat
from clld.web.maps import Map, ParameterMap, Layer
class LanguagesMap(Map):
def get_options(self):
return {'icon_size': 20, 'no_showlabels': True}
class SegmentMap(ParameterMap):
def get_options(self):
return {'icon_size': 20}
class InventoryMap(Map):
def get_options(self):
return {'icon_size': 20}
def get_layers(self):
yield Layer(
self.ctx.id,
self.ctx.name,
GeoJson(self.ctx).render(self.ctx.language, self.req, dump=False))
def get_default_options(self):
return {
'center': list(reversed(get_lonlat(self.ctx.language) or [0, 0])),
'zoom': 3,
'no_popup': True,
'no_link': True,
'sidebar': True}
def includeme(config):
config.register_map('languages', LanguagesMap)
config.register_map('parameter', SegmentMap)
config.register_map('contribution', InventoryMap)
| en | 000558051_ltxom-phoible_maps_810b6a6d0cc4.py | unknown | 313 |
############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import asyncio
import json
import time
import uuid
from json import JSONDecodeError
from typing import Optional
from pygls.lsp.methods import (COMPLETION, TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_CLOSE, TEXT_DOCUMENT_DID_OPEN)
from pygls.lsp.types import (CompletionItem, CompletionList, CompletionOptions,
CompletionParams, ConfigurationItem,
ConfigurationParams, Diagnostic,
DidChangeTextDocumentParams,
DidCloseTextDocumentParams,
DidOpenTextDocumentParams, MessageType, Position,
Range, Registration, RegistrationParams,
Unregistration, UnregistrationParams)
from pygls.lsp.types.basic_structures import (WorkDoneProgressBegin,
WorkDoneProgressEnd,
WorkDoneProgressReport)
from pygls.server import LanguageServer
COUNT_DOWN_START_IN_SECONDS = 10
COUNT_DOWN_SLEEP_IN_SECONDS = 1
class JsonLanguageServer(LanguageServer):
CMD_COUNT_DOWN_BLOCKING = 'countDownBlocking'
CMD_COUNT_DOWN_NON_BLOCKING = 'countDownNonBlocking'
CMD_PROGRESS = 'progress'
CMD_REGISTER_COMPLETIONS = 'registerCompletions'
CMD_SHOW_CONFIGURATION_ASYNC = 'showConfigurationAsync'
CMD_SHOW_CONFIGURATION_CALLBACK = 'showConfigurationCallback'
CMD_SHOW_CONFIGURATION_THREAD = 'showConfigurationThread'
CMD_UNREGISTER_COMPLETIONS = 'unregisterCompletions'
CONFIGURATION_SECTION = 'jsonServer'
def __init__(self):
super().__init__()
json_server = JsonLanguageServer()
def _validate(ls, params):
ls.show_message_log('Validating json...')
text_doc = ls.workspace.get_document(params.text_document.uri)
source = text_doc.source
diagnostics = _validate_json(source) if source else []
ls.publish_diagnostics(text_doc.uri, diagnostics)
def _validate_json(source):
"""Validates json file."""
diagnostics = []
try:
json.loads(source)
except JSONDecodeError as err:
msg = err.msg
col = err.colno
line = err.lineno
d = Diagnostic(
range=Range(
start=Position(line=line - 1, character=col - 1),
end=Position(line=line - 1, character=col)
),
message=msg,
source=type(json_server).__name__
)
diagnostics.append(d)
return diagnostics
@json_server.feature(COMPLETION, CompletionOptions(trigger_characters=[',']))
def completions(params: Optional[CompletionParams] = None) -> CompletionList:
"""Returns completion items."""
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label='"'),
CompletionItem(label='['),
CompletionItem(label=']'),
CompletionItem(label='{'),
CompletionItem(label='}'),
]
)
@json_server.command(JsonLanguageServer.CMD_COUNT_DOWN_BLOCKING)
def count_down_10_seconds_blocking(ls, *args):
"""Starts counting down and showing message synchronously.
It will `block` the main thread, which can be tested by trying to show
completion items.
"""
for i in range(COUNT_DOWN_START_IN_SECONDS):
ls.show_message(f'Counting down... {COUNT_DOWN_START_IN_SECONDS - i}')
time.sleep(COUNT_DOWN_SLEEP_IN_SECONDS)
@json_server.command(JsonLanguageServer.CMD_COUNT_DOWN_NON_BLOCKING)
async def count_down_10_seconds_non_blocking(ls, *args):
"""Starts counting down and showing message asynchronously.
It won't `block` the main thread, which can be tested by trying to show
completion items.
"""
for i in range(COUNT_DOWN_START_IN_SECONDS):
ls.show_message(f'Counting down... {COUNT_DOWN_START_IN_SECONDS - i}')
await asyncio.sleep(COUNT_DOWN_SLEEP_IN_SECONDS)
@json_server.feature(TEXT_DOCUMENT_DID_CHANGE)
def did_change(ls, params: DidChangeTextDocumentParams):
"""Text document did change notification."""
_validate(ls, params)
@json_server.feature(TEXT_DOCUMENT_DID_CLOSE)
def did_close(server: JsonLanguageServer, params: DidCloseTextDocumentParams):
"""Text document did close notification."""
server.show_message('Text Document Did Close')
@json_server.feature(TEXT_DOCUMENT_DID_OPEN)
async def did_open(ls, params: DidOpenTextDocumentParams):
"""Text document did open notification."""
ls.show_message('Text Document Did Open')
_validate(ls, params)
@json_server.command(JsonLanguageServer.CMD_PROGRESS)
async def progress(ls: JsonLanguageServer, *args):
"""Create and start the progress on the client."""
token = 'token'
# Create
await ls.progress.create_async(token)
# Begin
ls.progress.begin(token, WorkDoneProgressBegin(title='Indexing', percentage=0))
# Report
for i in range(1, 10):
ls.progress.report(
token,
WorkDoneProgressReport(message=f'{i * 10}%', percentage= i * 10),
)
await asyncio.sleep(2)
# End
ls.progress.end(token, WorkDoneProgressEnd(message='Finished'))
@json_server.command(JsonLanguageServer.CMD_REGISTER_COMPLETIONS)
async def register_completions(ls: JsonLanguageServer, *args):
"""Register completions method on the client."""
params = RegistrationParams(registrations=[
Registration(
id=str(uuid.uuid4()),
method=COMPLETION,
register_options={"triggerCharacters": "[':']"})
])
response = await ls.register_capability_async(params)
if response is None:
ls.show_message('Successfully registered completions method')
else:
ls.show_message('Error happened during completions registration.',
MessageType.Error)
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_ASYNC)
async def show_configuration_async(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using coroutines."""
try:
config = await ls.get_configuration_async(
ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
]))
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_CALLBACK)
def show_configuration_callback(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using callback."""
def _config_callback(config):
try:
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
ls.get_configuration(ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
]), _config_callback)
@json_server.thread()
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_THREAD)
def show_configuration_thread(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using thread pool."""
try:
config = ls.get_configuration(ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
])).result(2)
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
@json_server.command(JsonLanguageServer.CMD_UNREGISTER_COMPLETIONS)
async def unregister_completions(ls: JsonLanguageServer, *args):
"""Unregister completions method on the client."""
params = UnregistrationParams(unregisterations=[
Unregistration(id=str(uuid.uuid4()), method=COMPLETION)
])
response = await ls.unregister_capability_async(params)
if response is None:
ls.show_message('Successfully unregistered completions method')
else:
ls.show_message('Error happened during completions unregistration.',
MessageType.Error)
| ############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import asyncio
import json
import time
import uuid
from json import JSONDecodeError
from typing import Optional
from pygls.lsp.methods import (COMPLETION, TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_CLOSE, TEXT_DOCUMENT_DID_OPEN)
from pygls.lsp.types import (CompletionItem, CompletionList, CompletionOptions,
CompletionParams, ConfigurationItem,
ConfigurationParams, Diagnostic,
DidChangeTextDocumentParams,
DidCloseTextDocumentParams,
DidOpenTextDocumentParams, MessageType, Position,
Range, Registration, RegistrationParams,
Unregistration, UnregistrationParams)
from pygls.lsp.types.basic_structures import (WorkDoneProgressBegin,
WorkDoneProgressEnd,
WorkDoneProgressReport)
from pygls.server import LanguageServer
COUNT_DOWN_START_IN_SECONDS = 10
COUNT_DOWN_SLEEP_IN_SECONDS = 1
class JsonLanguageServer(LanguageServer):
CMD_COUNT_DOWN_BLOCKING = 'countDownBlocking'
CMD_COUNT_DOWN_NON_BLOCKING = 'countDownNonBlocking'
CMD_PROGRESS = 'progress'
CMD_REGISTER_COMPLETIONS = 'registerCompletions'
CMD_SHOW_CONFIGURATION_ASYNC = 'showConfigurationAsync'
CMD_SHOW_CONFIGURATION_CALLBACK = 'showConfigurationCallback'
CMD_SHOW_CONFIGURATION_THREAD = 'showConfigurationThread'
CMD_UNREGISTER_COMPLETIONS = 'unregisterCompletions'
CONFIGURATION_SECTION = 'jsonServer'
def __init__(self):
super().__init__()
json_server = JsonLanguageServer()
def _validate(ls, params):
ls.show_message_log('Validating json...')
text_doc = ls.workspace.get_document(params.text_document.uri)
source = text_doc.source
diagnostics = _validate_json(source) if source else []
ls.publish_diagnostics(text_doc.uri, diagnostics)
def _validate_json(source):
"""Validates json file."""
diagnostics = []
try:
json.loads(source)
except JSONDecodeError as err:
msg = err.msg
col = err.colno
line = err.lineno
d = Diagnostic(
range=Range(
start=Position(line=line - 1, character=col - 1),
end=Position(line=line - 1, character=col)
),
message=msg,
source=type(json_server).__name__
)
diagnostics.append(d)
return diagnostics
@json_server.feature(COMPLETION, CompletionOptions(trigger_characters=[',']))
def completions(params: Optional[CompletionParams] = None) -> CompletionList:
"""Returns completion items."""
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label='"'),
CompletionItem(label='['),
CompletionItem(label=']'),
CompletionItem(label='{'),
CompletionItem(label='}'),
]
)
@json_server.command(JsonLanguageServer.CMD_COUNT_DOWN_BLOCKING)
def count_down_10_seconds_blocking(ls, *args):
"""Starts counting down and showing message synchronously.
It will `block` the main thread, which can be tested by trying to show
completion items.
"""
for i in range(COUNT_DOWN_START_IN_SECONDS):
ls.show_message(f'Counting down... {COUNT_DOWN_START_IN_SECONDS - i}')
time.sleep(COUNT_DOWN_SLEEP_IN_SECONDS)
@json_server.command(JsonLanguageServer.CMD_COUNT_DOWN_NON_BLOCKING)
async def count_down_10_seconds_non_blocking(ls, *args):
"""Starts counting down and showing message asynchronously.
It won't `block` the main thread, which can be tested by trying to show
completion items.
"""
for i in range(COUNT_DOWN_START_IN_SECONDS):
ls.show_message(f'Counting down... {COUNT_DOWN_START_IN_SECONDS - i}')
await asyncio.sleep(COUNT_DOWN_SLEEP_IN_SECONDS)
@json_server.feature(TEXT_DOCUMENT_DID_CHANGE)
def did_change(ls, params: DidChangeTextDocumentParams):
"""Text document did change notification."""
_validate(ls, params)
@json_server.feature(TEXT_DOCUMENT_DID_CLOSE)
def did_close(server: JsonLanguageServer, params: DidCloseTextDocumentParams):
"""Text document did close notification."""
server.show_message('Text Document Did Close')
@json_server.feature(TEXT_DOCUMENT_DID_OPEN)
async def did_open(ls, params: DidOpenTextDocumentParams):
"""Text document did open notification."""
ls.show_message('Text Document Did Open')
_validate(ls, params)
@json_server.command(JsonLanguageServer.CMD_PROGRESS)
async def progress(ls: JsonLanguageServer, *args):
"""Create and start the progress on the client."""
token = 'token'
# Create
await ls.progress.create_async(token)
# Begin
ls.progress.begin(token, WorkDoneProgressBegin(title='Indexing', percentage=0))
# Report
for i in range(1, 10):
ls.progress.report(
token,
WorkDoneProgressReport(message=f'{i * 10}%', percentage= i * 10),
)
await asyncio.sleep(2)
# End
ls.progress.end(token, WorkDoneProgressEnd(message='Finished'))
@json_server.command(JsonLanguageServer.CMD_REGISTER_COMPLETIONS)
async def register_completions(ls: JsonLanguageServer, *args):
"""Register completions method on the client."""
params = RegistrationParams(registrations=[
Registration(
id=str(uuid.uuid4()),
method=COMPLETION,
register_options={"triggerCharacters": "[':']"})
])
response = await ls.register_capability_async(params)
if response is None:
ls.show_message('Successfully registered completions method')
else:
ls.show_message('Error happened during completions registration.',
MessageType.Error)
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_ASYNC)
async def show_configuration_async(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using coroutines."""
try:
config = await ls.get_configuration_async(
ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
]))
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_CALLBACK)
def show_configuration_callback(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using callback."""
def _config_callback(config):
try:
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
ls.get_configuration(ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
]), _config_callback)
@json_server.thread()
@json_server.command(JsonLanguageServer.CMD_SHOW_CONFIGURATION_THREAD)
def show_configuration_thread(ls: JsonLanguageServer, *args):
"""Gets exampleConfiguration from the client settings using thread pool."""
try:
config = ls.get_configuration(ConfigurationParams(items=[
ConfigurationItem(
scope_uri='',
section=JsonLanguageServer.CONFIGURATION_SECTION)
])).result(2)
example_config = config[0].get('exampleConfiguration')
ls.show_message(f'jsonServer.exampleConfiguration value: {example_config}')
except Exception as e:
ls.show_message_log(f'Error ocurred: {e}')
@json_server.command(JsonLanguageServer.CMD_UNREGISTER_COMPLETIONS)
async def unregister_completions(ls: JsonLanguageServer, *args):
"""Unregister completions method on the client."""
params = UnregistrationParams(unregisterations=[
Unregistration(id=str(uuid.uuid4()), method=COMPLETION)
])
response = await ls.unregister_capability_async(params)
if response is None:
ls.show_message('Successfully unregistered completions method')
else:
ls.show_message('Error happened during completions unregistration.',
MessageType.Error)
| en | 000330104_DillanCMills-pygls_server_bdbe47f7cf8d.py | unknown | 2,576 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import re
import datetime
import scrapy
from scrapy import Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Identity, Compose, MapCompose, TakeFirst, Join
from dateutil.parser import parse as dateutil_parse
from w3lib.html import remove_tags
def num_page_extractor(num_pages):
if num_pages:
return num_pages.split()[0]
return None
def safe_parse_date(date):
try:
date = dateutil_parse(date, fuzzy=True, default=datetime.datetime.min)
date = date.strftime("%Y-%m-%d %H:%M:%S")
except ValueError:
date = None
return date
def extract_publish_dates(maybe_dates):
maybe_dates = [s for s in maybe_dates if "published" in s.lower()]
return [safe_parse_date(date) for date in maybe_dates]
def extract_year(s):
s = s.lower().strip()
match = re.match(".*first published.*(\d{4})", s)
if match:
return match.group(1)
def extract_ratings(txt):
"""Extract the rating histogram from embedded Javascript code
The embedded code looks like this:
|----------------------------------------------------------|
| renderRatingGraph([6, 3, 2, 2, 1]); |
| if ($('rating_details')) { |
| $('rating_details').insert({top: $('rating_graph')}) |
| } |
|----------------------------------------------------------|
"""
codelines = "".join(txt).split(";")
rating_code = [line.strip() for line in codelines if "renderRatingGraph" in line]
if not rating_code:
return None
rating_code = rating_code[0]
rating_array = rating_code[rating_code.index("[") + 1 : rating_code.index("]")]
ratings = {5 - i:int(x) for i, x in enumerate(rating_array.split(","))}
return ratings
def filter_asin(asin):
if asin and len(str(asin)) == 10:
return asin
return None
def isbn_filter(isbn):
if isbn and len(str(isbn)) == 10 and isbn.isdigit():
return isbn
def isbn13_filter(isbn):
if isbn and len(str(isbn)) == 13 and isbn.isdigit():
return isbn
def filter_empty(vals):
return [v.strip() for v in vals if v.strip()]
def split_by_newline(txt):
return txt.split("\n")
class BookItem(scrapy.Item):
# Scalars
url = Field()
title = Field(input_processor=MapCompose(str.strip))
author = Field(input_processor=MapCompose(str.strip))
num_ratings = Field(input_processor=MapCompose(str.strip, int))
num_reviews = Field(input_processor=MapCompose(str.strip, int))
avg_rating = Field(input_processor=MapCompose(str.strip, float))
num_pages = Field(input_processor=MapCompose(str.strip, num_page_extractor, int))
language = Field(input_processor=MapCompose(str.strip))
publish_date = Field(input_processor=extract_publish_dates)
original_publish_year = Field(input_processor=MapCompose(extract_year, int))
isbn = Field(input_processor=MapCompose(str.strip, isbn_filter))
isbn13 = Field(input_processor=MapCompose(str.strip, isbn13_filter))
asin = Field(input_processor=MapCompose(filter_asin))
series = Field()
# Lists
awards = Field(output_processor=Identity())
places = Field(output_processor=Identity())
characters = Field(output_processor=Identity())
genres = Field(output_processor=Compose(set, list))
# Dicts
rating_histogram = Field(input_processor=MapCompose(extract_ratings))
class BookLoader(ItemLoader):
default_output_processor = TakeFirst()
class AuthorItem(scrapy.Item):
# Scalars
url = Field()
name = Field()
birth_date = Field(input_processor=MapCompose(safe_parse_date))
death_date = Field(input_processor=MapCompose(safe_parse_date))
avg_rating = Field(serializer=float)
num_ratings = Field(serializer=int)
num_reviews = Field(serializer=int)
# Lists
genres = Field(output_processor=Compose(set, list))
influences = Field(output_processor=Compose(set, list))
# Blobs
about = Field(
# Take the first match, remove HTML tags, convert to list of lines, remove empty lines, remove the "edit data" prefix
input_processor=Compose(TakeFirst(), remove_tags, split_by_newline, filter_empty, lambda s: s[1:]),
output_processor=Join()
)
class AuthorLoader(ItemLoader):
default_output_processor = TakeFirst()
| # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import re
import datetime
import scrapy
from scrapy import Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Identity, Compose, MapCompose, TakeFirst, Join
from dateutil.parser import parse as dateutil_parse
from w3lib.html import remove_tags
def num_page_extractor(num_pages):
if num_pages:
return num_pages.split()[0]
return None
def safe_parse_date(date):
try:
date = dateutil_parse(date, fuzzy=True, default=datetime.datetime.min)
date = date.strftime("%Y-%m-%d %H:%M:%S")
except ValueError:
date = None
return date
def extract_publish_dates(maybe_dates):
maybe_dates = [s for s in maybe_dates if "published" in s.lower()]
return [safe_parse_date(date) for date in maybe_dates]
def extract_year(s):
s = s.lower().strip()
match = re.match(".*first published.*(\d{4})", s)
if match:
return match.group(1)
def extract_ratings(txt):
"""Extract the rating histogram from embedded Javascript code
The embedded code looks like this:
|----------------------------------------------------------|
| renderRatingGraph([6, 3, 2, 2, 1]); |
| if ($('rating_details')) { |
| $('rating_details').insert({top: $('rating_graph')}) |
| } |
|----------------------------------------------------------|
"""
codelines = "".join(txt).split(";")
rating_code = [line.strip() for line in codelines if "renderRatingGraph" in line]
if not rating_code:
return None
rating_code = rating_code[0]
rating_array = rating_code[rating_code.index("[") + 1 : rating_code.index("]")]
ratings = {5 - i:int(x) for i, x in enumerate(rating_array.split(","))}
return ratings
def filter_asin(asin):
if asin and len(str(asin)) == 10:
return asin
return None
def isbn_filter(isbn):
if isbn and len(str(isbn)) == 10 and isbn.isdigit():
return isbn
def isbn13_filter(isbn):
if isbn and len(str(isbn)) == 13 and isbn.isdigit():
return isbn
def filter_empty(vals):
return [v.strip() for v in vals if v.strip()]
def split_by_newline(txt):
return txt.split("\n")
class BookItem(scrapy.Item):
# Scalars
url = Field()
title = Field(input_processor=MapCompose(str.strip))
author = Field(input_processor=MapCompose(str.strip))
num_ratings = Field(input_processor=MapCompose(str.strip, int))
num_reviews = Field(input_processor=MapCompose(str.strip, int))
avg_rating = Field(input_processor=MapCompose(str.strip, float))
num_pages = Field(input_processor=MapCompose(str.strip, num_page_extractor, int))
language = Field(input_processor=MapCompose(str.strip))
publish_date = Field(input_processor=extract_publish_dates)
original_publish_year = Field(input_processor=MapCompose(extract_year, int))
isbn = Field(input_processor=MapCompose(str.strip, isbn_filter))
isbn13 = Field(input_processor=MapCompose(str.strip, isbn13_filter))
asin = Field(input_processor=MapCompose(filter_asin))
series = Field()
# Lists
awards = Field(output_processor=Identity())
places = Field(output_processor=Identity())
characters = Field(output_processor=Identity())
genres = Field(output_processor=Compose(set, list))
# Dicts
rating_histogram = Field(input_processor=MapCompose(extract_ratings))
class BookLoader(ItemLoader):
default_output_processor = TakeFirst()
class AuthorItem(scrapy.Item):
# Scalars
url = Field()
name = Field()
birth_date = Field(input_processor=MapCompose(safe_parse_date))
death_date = Field(input_processor=MapCompose(safe_parse_date))
avg_rating = Field(serializer=float)
num_ratings = Field(serializer=int)
num_reviews = Field(serializer=int)
# Lists
genres = Field(output_processor=Compose(set, list))
influences = Field(output_processor=Compose(set, list))
# Blobs
about = Field(
# Take the first match, remove HTML tags, convert to list of lines, remove empty lines, remove the "edit data" prefix
input_processor=Compose(TakeFirst(), remove_tags, split_by_newline, filter_empty, lambda s: s[1:]),
output_processor=Join()
)
class AuthorLoader(ItemLoader):
default_output_processor = TakeFirst()
| en | 000778511_havanagrawal-GoodreadsScraper_items_38eb78c97a7d.py | unknown | 1,400 |
# coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learned_optimizers.tasks.fixed.vit."""
from absl.testing import absltest
from absl.testing import parameterized
from learned_optimization.tasks import test_utils
from learned_optimization.tasks.fixed import mlp_mixer
tasks = [
'MLPMixer_Cifar100_bs256_tiny16',
'MLPMixer_Cifar100_small16',
'MLPMixer_Cifar100_tiny16',
'MLPMixer_Food101_64_bs256_tiny16',
'MLPMixer_Food101_64_small16',
'MLPMixer_Food101_64_tiny16',
'MLPMixer_ImageNet64_bs256_tiny16',
'MLPMixer_ImageNet64_small16',
'MLPMixer_ImageNet64_tiny16',
]
class MLPMixerTest(parameterized.TestCase):
@parameterized.parameters(tasks)
def test_tasks(self, task_name):
task = getattr(mlp_mixer, task_name)()
test_utils.smoketest_task(task)
if __name__ == '__main__':
absltest.main()
| # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learned_optimizers.tasks.fixed.vit."""
from absl.testing import absltest
from absl.testing import parameterized
from learned_optimization.tasks import test_utils
from learned_optimization.tasks.fixed import mlp_mixer
tasks = [
'MLPMixer_Cifar100_bs256_tiny16',
'MLPMixer_Cifar100_small16',
'MLPMixer_Cifar100_tiny16',
'MLPMixer_Food101_64_bs256_tiny16',
'MLPMixer_Food101_64_small16',
'MLPMixer_Food101_64_tiny16',
'MLPMixer_ImageNet64_bs256_tiny16',
'MLPMixer_ImageNet64_small16',
'MLPMixer_ImageNet64_tiny16',
]
class MLPMixerTest(parameterized.TestCase):
@parameterized.parameters(tasks)
def test_tasks(self, task_name):
task = getattr(mlp_mixer, task_name)()
test_utils.smoketest_task(task)
if __name__ == '__main__':
absltest.main()
| en | 000090906_google-learned_optimization_mlp_mixer_test_0de2bfbcf571.py | unknown | 499 |
import os
import time
import uuid
from smbprotocol.connection import Connection
def test_connection(server, port):
conn = Connection(uuid.uuid4(), server, port=port)
print("Opening connection to %s:%d" % (server, port))
conn.connect(timeout=5)
conn.disconnect(True)
if __name__ == '__main__':
server = os.environ.get("SMB_SERVER", "127.0.0.1")
port = int(os.environ.get("SMB_PORT", 445))
print("Waiting for SMB server to be online")
attempt = 1
total_attempts = 20
while attempt < total_attempts:
print("Starting attempt %d" % attempt)
try:
test_connection(server, port)
break
except Exception as e:
print("Connection attempt %d failed: %s" % (attempt, str(e)))
attempt += 1
if attempt == total_attempts:
raise Exception("Timeout while waiting for SMB server to come "
"online")
print("Sleeping for 5 seconds before next attempt")
time.sleep(5)
print("Connection successful")
| import os
import time
import uuid
from smbprotocol.connection import Connection
def test_connection(server, port):
conn = Connection(uuid.uuid4(), server, port=port)
print("Opening connection to %s:%d" % (server, port))
conn.connect(timeout=5)
conn.disconnect(True)
if __name__ == '__main__':
server = os.environ.get("SMB_SERVER", "127.0.0.1")
port = int(os.environ.get("SMB_PORT", 445))
print("Waiting for SMB server to be online")
attempt = 1
total_attempts = 20
while attempt < total_attempts:
print("Starting attempt %d" % attempt)
try:
test_connection(server, port)
break
except Exception as e:
print("Connection attempt %d failed: %s" % (attempt, str(e)))
attempt += 1
if attempt == total_attempts:
raise Exception("Timeout while waiting for SMB server to come "
"online")
print("Sleeping for 5 seconds before next attempt")
time.sleep(5)
print("Connection successful")
| en | 000623938_wokis-smbprotocol_check-smb_1e52cef5d4b6.py | unknown | 304 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Config file of the Gophish command line interface.
@author: Martin Dubé
@organization: Gosecure inc.
@license: MIT License
@contact: mdube@gosecure.ca
Copyright (c) 2017, Gosecure
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import datetime
#
# Step 1: Gophish configuration
#
# Just the basic configuration for basic features
#
API_KEY = ''
API_URL = 'http://127.0.0.1:3333'
#
# Step 2: Campaign configuration
#
# Information regarding your campaign. Most comes from the gophish WebUI.
CAMPAIGN_NAME = 'John Doe'
CAMPAIGN_URL = 'https://path.toyourwebsite.com'
WORKING_DIR = '/path/to/working/dir'
EMAILS_PATH = WORKING_DIR + 'emails.txt'
# Landing Pages
LP_NAME = 'Landing Page Name'
# Two specific fields required by --print-creds to properly parse the JSON payloads.
# Update the fields based on your landing pages user and password fields.
LP_USER_FIELD = 'cUser'
LP_PWD_FIELD = 'cPass'
# Email Template
ET_NAME = 'Email Template Name'
# Sending Profiles
SP_NAME = 'Sending Profile Name'
# Batch Management Settings
GROUP_SIZE = 50
START_INTERVAL = 1 # Unit = minutes. Default=1. Increase when you have more than 10 batch.
BATCH_INTERVAL = 1 # Unit = minutes
# Verify TLS when testing credentials
# Default is True
VERIFY_TLS = True
# Owa login testing settings
OWA_DOMAIN = 'DOMAIN'
OWA_SERVER = 'outlook.example.com'
# Netscaler login testing settings
NETSCALER_SERVER = 'vpn.example.com'
# Juniper (Secure Access SSL VPN)
JUNIPER_DOMAIN = 'DOMAIN'
JUNIPER_SERVER = 'vpn.example.com'
# HINT: Consider verifying the URI as some organizations have multiple
# URIs which are 2FA or 1FA. The default one is often 2FA.
# For istance, /url/ can become /url_XX/, where XX is a number.
JUNIPER_URI = '/dana-na/auth/url/login.cgi'
# HINT: Find it in the source code of the login page. Look for a hidden
# input field named "realm".
JUNIPER_REALM = 'bla'
#
# Step 3: Things that should not change for most users
#
FILE_DATE_FMT = '%Y%m%d_%H%M%S'
FILE_DATE = datetime.datetime.now().strftime(FILE_DATE_FMT)
CAMPAIGN_NAME_TPL = '%s - Group %i'
CAMPAIGN_PREFIX = CAMPAIGN_NAME_TPL[:-2] % CAMPAIGN_NAME
RESULTS_PATH = WORKING_DIR + 'campaign_results_%s.csv' % CAMPAIGN_NAME
CREDS_PATH = WORKING_DIR + 'campaign_creds_%s_%s.csv' % (FILE_DATE, CAMPAIGN_NAME)
JSON_PATH = WORKING_DIR + 'campaign_raw_%s.json' % CAMPAIGN_NAME
GEOIP_PATH = WORKING_DIR + 'campaign_geoip_%s.csv' % CAMPAIGN_NAME
# Reporting
EXCLUDED_IP = []
GOPHISH_HOST = ''
GOPHISH_SSH_PORT = 22
GOPHISH_SSH_USER = 'root'
GOPHISH_SSH_PASS = None
GOPHISH_SSH_KEY = '/path/to/key'
GOPHISH_SSH_KEY_PASSPHRASE = 'some_pass'
# Gophish timestamps are in UTC. This will put dates as this timezone.
GOPHISH_TIMEZONE = "America/Toronto"
APACHE_HOST = GOPHISH_HOST
APACHE_SSH_PORT = GOPHISH_SSH_PORT
APACHE_SSH_USER = GOPHISH_SSH_USER
APACHE_SSH_PASS = GOPHISH_SSH_PASS
APACHE_SSH_KEY = GOPHISH_SSH_KEY
APACHE_SSH_KEY_PASSPHRASE = GOPHISH_SSH_KEY_PASSPHRASE
APACHE_LOGS_FOLDER = '/var/log/apache2/'
APACHE_LOGS_PREFIX = 'path.toyourwebsite.com'
# Take if from /etc/apache2/apache2.conf. The line starts with LogFormat. Currently using the "combined" one.
APACHE_LOGS_FORMAT = "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\""
APACHE_MALWARE_NAME = 'malware.zip'
EMPIRE_API_URL = 'https://127.0.0.1:1337'
EMPIRE_API_KEY = 'some_key'
SENDGRID_API_KEY = 'some_key'
#
# By default, we disable SSL verification as gophish uses a self-signed cert.
#
import gophish.client
import requests
from requests.packages import urllib3
class GophishClient(gophish.client.GophishClient):
""" A standard HTTP REST client used by Gophish """
def __init__(self, api_key, host, **kwargs):
super(GophishClient, self).__init__(api_key, host, **kwargs)
def execute(self, method, path, **kwargs):
""" Executes a request to a given endpoint, returning the result """
url = "{}{}".format(self.host, path)
kwargs.update(self._client_kwargs)
response = requests.request(
method, url, params={"api_key": self.api_key}, verify=False, **kwargs)
return response
# Just to remove a SubjectAltNameWarning.
urllib3.disable_warnings()
#
# Step 4: Advanced TLS settings
#
#
#
# Uncomment to configure TLS Client certificates or other TLS settings.
#
#
#import ssl
#import gophish.client
#from requests import Session
#from requests.adapters import HTTPAdapter
#from requests.packages.urllib3.poolmanager import PoolManager
#from requests.packages import urllib3
#
#class TLSHttpAdapter(HTTPAdapter):
# '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
#
# def init_poolmanager(self, connections, maxsize, block=False):
# self.poolmanager = PoolManager(num_pools=connections,
# maxsize=maxsize,
# block=block,
# ssl_version=ssl.PROTOCOL_TLSv1_2,
# cert_reqs='CERT_REQUIRED')
#
#class GophishClient(gophish.client.GophishClient):
# """ A standard HTTP REST client used by Gophish """
# def __init__(self, api_key, host, cert_file=None, ca_file=None, **kwargs):
# super(GophishClient, self).__init__(api_key, host, **kwargs)
# self.session = Session()
# self.session.mount(API_URL, TLSHttpAdapter())
# self.cert_file = '/path/to/client_cert.pem'
# self.ca_file = '/path/to/root_ca.crt'
#
# def execute(self, method, path, **kwargs):
# """ Executes a request to a given endpoint, returning the result """
#
# url = "{}{}".format(self.host, path)
# kwargs.update(self._client_kwargs)
# response = self.session.request(method, url, params={"api_key": self.api_key},
# cert=(self.cert_file), verify=self.ca_file, **kwargs)
# return response
#
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Config file of the Gophish command line interface.
@author: Martin Dubé
@organization: Gosecure inc.
@license: MIT License
@contact: mdube@gosecure.ca
Copyright (c) 2017, Gosecure
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import datetime
#
# Step 1: Gophish configuration
#
# Just the basic configuration for basic features
#
API_KEY = ''
API_URL = 'http://127.0.0.1:3333'
#
# Step 2: Campaign configuration
#
# Information regarding your campaign. Most comes from the gophish WebUI.
CAMPAIGN_NAME = 'John Doe'
CAMPAIGN_URL = 'https://path.toyourwebsite.com'
WORKING_DIR = '/path/to/working/dir'
EMAILS_PATH = WORKING_DIR + 'emails.txt'
# Landing Pages
LP_NAME = 'Landing Page Name'
# Two specific fields required by --print-creds to properly parse the JSON payloads.
# Update the fields based on your landing pages user and password fields.
LP_USER_FIELD = 'cUser'
LP_PWD_FIELD = 'cPass'
# Email Template
ET_NAME = 'Email Template Name'
# Sending Profiles
SP_NAME = 'Sending Profile Name'
# Batch Management Settings
GROUP_SIZE = 50
START_INTERVAL = 1 # Unit = minutes. Default=1. Increase when you have more than 10 batch.
BATCH_INTERVAL = 1 # Unit = minutes
# Verify TLS when testing credentials
# Default is True
VERIFY_TLS = True
# Owa login testing settings
OWA_DOMAIN = 'DOMAIN'
OWA_SERVER = 'outlook.example.com'
# Netscaler login testing settings
NETSCALER_SERVER = 'vpn.example.com'
# Juniper (Secure Access SSL VPN)
JUNIPER_DOMAIN = 'DOMAIN'
JUNIPER_SERVER = 'vpn.example.com'
# HINT: Consider verifying the URI as some organizations have multiple
# URIs which are 2FA or 1FA. The default one is often 2FA.
# For istance, /url/ can become /url_XX/, where XX is a number.
JUNIPER_URI = '/dana-na/auth/url/login.cgi'
# HINT: Find it in the source code of the login page. Look for a hidden
# input field named "realm".
JUNIPER_REALM = 'bla'
#
# Step 3: Things that should not change for most users
#
FILE_DATE_FMT = '%Y%m%d_%H%M%S'
FILE_DATE = datetime.datetime.now().strftime(FILE_DATE_FMT)
CAMPAIGN_NAME_TPL = '%s - Group %i'
CAMPAIGN_PREFIX = CAMPAIGN_NAME_TPL[:-2] % CAMPAIGN_NAME
RESULTS_PATH = WORKING_DIR + 'campaign_results_%s.csv' % CAMPAIGN_NAME
CREDS_PATH = WORKING_DIR + 'campaign_creds_%s_%s.csv' % (FILE_DATE, CAMPAIGN_NAME)
JSON_PATH = WORKING_DIR + 'campaign_raw_%s.json' % CAMPAIGN_NAME
GEOIP_PATH = WORKING_DIR + 'campaign_geoip_%s.csv' % CAMPAIGN_NAME
# Reporting
EXCLUDED_IP = []
GOPHISH_HOST = ''
GOPHISH_SSH_PORT = 22
GOPHISH_SSH_USER = 'root'
GOPHISH_SSH_PASS = None
GOPHISH_SSH_KEY = '/path/to/key'
GOPHISH_SSH_KEY_PASSPHRASE = 'some_pass'
# Gophish timestamps are in UTC. This will put dates as this timezone.
GOPHISH_TIMEZONE = "America/Toronto"
APACHE_HOST = GOPHISH_HOST
APACHE_SSH_PORT = GOPHISH_SSH_PORT
APACHE_SSH_USER = GOPHISH_SSH_USER
APACHE_SSH_PASS = GOPHISH_SSH_PASS
APACHE_SSH_KEY = GOPHISH_SSH_KEY
APACHE_SSH_KEY_PASSPHRASE = GOPHISH_SSH_KEY_PASSPHRASE
APACHE_LOGS_FOLDER = '/var/log/apache2/'
APACHE_LOGS_PREFIX = 'path.toyourwebsite.com'
# Take if from /etc/apache2/apache2.conf. The line starts with LogFormat. Currently using the "combined" one.
APACHE_LOGS_FORMAT = "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\""
APACHE_MALWARE_NAME = 'malware.zip'
EMPIRE_API_URL = 'https://127.0.0.1:1337'
EMPIRE_API_KEY = 'some_key'
SENDGRID_API_KEY = 'some_key'
#
# By default, we disable SSL verification as gophish uses a self-signed cert.
#
import gophish.client
import requests
from requests.packages import urllib3
class GophishClient(gophish.client.GophishClient):
""" A standard HTTP REST client used by Gophish """
def __init__(self, api_key, host, **kwargs):
super(GophishClient, self).__init__(api_key, host, **kwargs)
def execute(self, method, path, **kwargs):
""" Executes a request to a given endpoint, returning the result """
url = "{}{}".format(self.host, path)
kwargs.update(self._client_kwargs)
response = requests.request(
method, url, params={"api_key": self.api_key}, verify=False, **kwargs)
return response
# Just to remove a SubjectAltNameWarning.
urllib3.disable_warnings()
#
# Step 4: Advanced TLS settings
#
#
#
# Uncomment to configure TLS Client certificates or other TLS settings.
#
#
#import ssl
#import gophish.client
#from requests import Session
#from requests.adapters import HTTPAdapter
#from requests.packages.urllib3.poolmanager import PoolManager
#from requests.packages import urllib3
#
#class TLSHttpAdapter(HTTPAdapter):
# '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
#
# def init_poolmanager(self, connections, maxsize, block=False):
# self.poolmanager = PoolManager(num_pools=connections,
# maxsize=maxsize,
# block=block,
# ssl_version=ssl.PROTOCOL_TLSv1_2,
# cert_reqs='CERT_REQUIRED')
#
#class GophishClient(gophish.client.GophishClient):
# """ A standard HTTP REST client used by Gophish """
# def __init__(self, api_key, host, cert_file=None, ca_file=None, **kwargs):
# super(GophishClient, self).__init__(api_key, host, **kwargs)
# self.session = Session()
# self.session.mount(API_URL, TLSHttpAdapter())
# self.cert_file = '/path/to/client_cert.pem'
# self.ca_file = '/path/to/root_ca.crt'
#
# def execute(self, method, path, **kwargs):
# """ Executes a request to a given endpoint, returning the result """
#
# url = "{}{}".format(self.host, path)
# kwargs.update(self._client_kwargs)
# response = self.session.request(method, url, params={"api_key": self.api_key},
# cert=(self.cert_file), verify=self.ca_file, **kwargs)
# return response
#
| en | 000616520_ninostephen-gophish-cli_config.default_532b62ae482d.py | unknown | 2,260 |
import tensorflow as tf
import utils.utils as utils
class SemanticCNN:
def __init__(self, config,
sequence_length, vocab_size, embedding_size, num_filters):
self.config = config
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.num_filters = num_filters
if config.get('main', 'seed') == 'None':
self.seed = None
else:
self.seed = config.getint('main', 'seed')
def conv2d(self, data, weight):
return tf.nn.conv2d(data,
weight,
strides=[1, 1, 1, 1],
padding='VALID')
def max_pool(self, data, filter_size):
return tf.nn.max_pool(data,
ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID')
def variable(self, flavor, shape):
if flavor == 'W_truncated_normal':
return tf.Variable(
tf.truncated_normal(shape,
stddev=0.1,
seed=self.seed,
dtype=tf.float32))
elif flavor == 'W_random_uniform':
return tf.Variable(
tf.random_uniform(shape,
minval=-1.0,
maxval=1.0))
elif flavor == 'b':
return tf.Variable(tf.constant(0.1, shape=shape),
dtype=tf.float32)
else:
return None
def train_input_placeholders(self):
x = tf.placeholder(tf.float32,
shape=[None, self.sequence_length],
name="x")
y_ = tf.placeholder(tf.float32,
[None, self.config.getint('main', 'num_classes')], name="y_")
return x, y_
def model(self, data):
l2_loss = tf.constant(0.0)
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
embed_W = self.variable('W_random_uniform', [self.vocab_size, self.embedding_size])
embedded_words = tf.nn.embedding_lookup(embed_W, tf.cast(data, tf.int32))
embedded_words_expanded = tf.expand_dims(embedded_words, -1)
filter3_shape = [3, self.embedding_size, 1, self.num_filters]
pool_filter3_W = self.variable('W_truncated_normal', filter3_shape)
pool_filter3_b = self.variable('b', [self.num_filters])
conv1 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter3_W), pool_filter3_b))
pool_filter3 = self.max_pool(conv1, 3)
filter4_shape = [4, self.embedding_size, 1, self.num_filters]
pool_filter4_W = self.variable('W_truncated_normal', filter4_shape)
pool_filter4_b = self.variable('b', [self.num_filters])
conv2 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter4_W), pool_filter4_b))
pool_filter4 = self.max_pool(conv2, 4)
filter5_shape = [5, self.embedding_size, 1, self.num_filters]
pool_filter5_W = self.variable('W_truncated_normal', filter5_shape)
pool_filter5_b = self.variable('b', [self.num_filters])
conv3 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter5_W), pool_filter5_b))
pool_filter5 = self.max_pool(conv3, 5)
pool_combined = tf.concat(3, [pool_filter3, pool_filter4, pool_filter5])
pool_final = tf.reshape(pool_combined, [-1, self.num_filters * 3])
dropout = tf.nn.dropout(pool_final, keep_prob)
final_W = tf.get_variable("W", shape=[self.num_filters * 3,
self.config.getint('main', 'num_classes')],
initializer=tf.contrib.layers.xavier_initializer())
final_b = tf.Variable(tf.constant(0.1,
shape=[self.config.getint('main', 'num_classes')]), name="b")
logits = tf.matmul(dropout, final_W) + final_b
y_conv = tf.nn.softmax(logits)
l2_loss += tf.nn.l2_loss(final_W) + tf.nn.l2_loss(final_b)
return y_conv, logits, keep_prob, l2_loss, embedded_words, embed_W
| import tensorflow as tf
import utils.utils as utils
class SemanticCNN:
def __init__(self, config,
sequence_length, vocab_size, embedding_size, num_filters):
self.config = config
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.num_filters = num_filters
if config.get('main', 'seed') == 'None':
self.seed = None
else:
self.seed = config.getint('main', 'seed')
def conv2d(self, data, weight):
return tf.nn.conv2d(data,
weight,
strides=[1, 1, 1, 1],
padding='VALID')
def max_pool(self, data, filter_size):
return tf.nn.max_pool(data,
ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID')
def variable(self, flavor, shape):
if flavor == 'W_truncated_normal':
return tf.Variable(
tf.truncated_normal(shape,
stddev=0.1,
seed=self.seed,
dtype=tf.float32))
elif flavor == 'W_random_uniform':
return tf.Variable(
tf.random_uniform(shape,
minval=-1.0,
maxval=1.0))
elif flavor == 'b':
return tf.Variable(tf.constant(0.1, shape=shape),
dtype=tf.float32)
else:
return None
def train_input_placeholders(self):
x = tf.placeholder(tf.float32,
shape=[None, self.sequence_length],
name="x")
y_ = tf.placeholder(tf.float32,
[None, self.config.getint('main', 'num_classes')], name="y_")
return x, y_
def model(self, data):
l2_loss = tf.constant(0.0)
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
embed_W = self.variable('W_random_uniform', [self.vocab_size, self.embedding_size])
embedded_words = tf.nn.embedding_lookup(embed_W, tf.cast(data, tf.int32))
embedded_words_expanded = tf.expand_dims(embedded_words, -1)
filter3_shape = [3, self.embedding_size, 1, self.num_filters]
pool_filter3_W = self.variable('W_truncated_normal', filter3_shape)
pool_filter3_b = self.variable('b', [self.num_filters])
conv1 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter3_W), pool_filter3_b))
pool_filter3 = self.max_pool(conv1, 3)
filter4_shape = [4, self.embedding_size, 1, self.num_filters]
pool_filter4_W = self.variable('W_truncated_normal', filter4_shape)
pool_filter4_b = self.variable('b', [self.num_filters])
conv2 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter4_W), pool_filter4_b))
pool_filter4 = self.max_pool(conv2, 4)
filter5_shape = [5, self.embedding_size, 1, self.num_filters]
pool_filter5_W = self.variable('W_truncated_normal', filter5_shape)
pool_filter5_b = self.variable('b', [self.num_filters])
conv3 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter5_W), pool_filter5_b))
pool_filter5 = self.max_pool(conv3, 5)
pool_combined = tf.concat(3, [pool_filter3, pool_filter4, pool_filter5])
pool_final = tf.reshape(pool_combined, [-1, self.num_filters * 3])
dropout = tf.nn.dropout(pool_final, keep_prob)
final_W = tf.get_variable("W", shape=[self.num_filters * 3,
self.config.getint('main', 'num_classes')],
initializer=tf.contrib.layers.xavier_initializer())
final_b = tf.Variable(tf.constant(0.1,
shape=[self.config.getint('main', 'num_classes')]), name="b")
logits = tf.matmul(dropout, final_W) + final_b
y_conv = tf.nn.softmax(logits)
l2_loss += tf.nn.l2_loss(final_W) + tf.nn.l2_loss(final_b)
return y_conv, logits, keep_prob, l2_loss, embedded_words, embed_W
| en | 000118625_macdaliot-deep-pwning_semantic_cnn_1fd9fe9205eb.py | unknown | 1,354 |
from typing import Optional, Dict, Callable
import torch
# This file contains various physical constants and functions to convert units
# from the atomic units
__all__ = ["length_to", "time_to", "freq_to", "ir_ints_to", "raman_ints_to",
"edipole_to", "equadrupole_to"]
# 1 atomic unit in SI
LENGTH = 5.29177210903e-11 # m
TIME = 2.4188843265857e-17 # s
CHARGE = 1.602176634e-19 # C
# 1 atomic unit in other unit
DEBYE = 2.541746473 # Debye (for dipole)
ANGSTROM = LENGTH / 1e-10 # angstrom (length)
AMU = 5.485799090649e-4 # atomic mass unit (mass)
# constants in SI
LIGHT_SPEED = 2.99792458e8 # m/s
# scales
ATTO = 1e-15
FEMTO = 1e-12
NANO = 1e-9
MICRO = 1e-6
MILLI = 1e-3
CENTI = 1e-2
DECI = 1e-1
KILO = 1e3
MEGA = 1e6
GIGA = 1e9
TERA = 1e12
PhysVarType = torch.Tensor
UnitType = Optional[str]
_length_converter = {
"angst": ANGSTROM,
"angstrom": ANGSTROM,
"m": LENGTH,
"cm": LENGTH / CENTI,
}
_freq_converter = {
"cm-1": CENTI / TIME / LIGHT_SPEED,
"cm^-1": CENTI / TIME / LIGHT_SPEED,
"hz": 1.0 / TIME,
"khz": 1.0 / TIME / KILO,
"mhz": 1.0 / TIME / MEGA,
"ghz": 1.0 / TIME / GIGA,
"thz": 1.0 / TIME / TERA,
}
_ir_ints_converter = {
"(debye/angst)^2/amu": (DEBYE / ANGSTROM) ** 2 / AMU,
"km/mol": (DEBYE / ANGSTROM) ** 2 / AMU * 42.256, # from https://dx.doi.org/10.1002%2Fjcc.24344
}
_raman_ints_converter = {
"angst^4/amu": ANGSTROM ** 4 / AMU,
}
_time_converter = {
"s": TIME,
"us": TIME / MICRO,
"ns": TIME / NANO,
"fs": TIME / FEMTO,
}
_edipole_converter = {
"d": DEBYE,
"debye": DEBYE,
"c*m": DEBYE, # Coulomb meter
}
_equadrupole_converter = {
"debye*angst": DEBYE * ANGSTROM # Debye angstrom
}
def _avail_keys(converter: Dict[str, float]) -> str:
# returns the available keys in a string of list of string
return str(list(_length_converter.keys()))
def _add_docstr_to(phys: str, converter: Dict[str, float]) -> Callable:
# automatically add docstring for converter functions
def decorator(callable: Callable):
callable.__doc__ = f"""
Convert the {phys} from atomic unit to the given unit.
Available units are (case-insensitive): {_avail_keys(converter)}
"""
return callable
return decorator
@_add_docstr_to("time", _time_converter)
def time_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit time from atomic unit to the given unit
return _converter_to(a, unit, _time_converter)
@_add_docstr_to("frequency", _freq_converter)
def freq_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit frequency from atomic unit to the given unit
return _converter_to(a, unit, _freq_converter)
@_add_docstr_to("IR intensity", _ir_ints_converter)
def ir_ints_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit IR intensity from atomic unit to the given unit
return _converter_to(a, unit, _ir_ints_converter)
@_add_docstr_to("Raman intensity", _raman_ints_converter)
def raman_ints_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit IR intensity from atomic unit to the given unit
return _converter_to(a, unit, _raman_ints_converter)
@_add_docstr_to("length", _length_converter)
def length_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit length from atomic unit to the given unit
return _converter_to(a, unit, _length_converter)
@_add_docstr_to("electric dipole", _edipole_converter)
def edipole_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit electric dipole from atomic unit to the given unit
return _converter_to(a, unit, _edipole_converter)
@_add_docstr_to("electric quadrupole", _equadrupole_converter)
def equadrupole_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit electric dipole from atomic unit to the given unit
return _converter_to(a, unit, _equadrupole_converter)
def _converter_to(a: PhysVarType, unit: UnitType, converter: Dict[str, float]) -> PhysVarType:
# converter from the atomic unit
if unit is None:
return a
u = unit.lower()
try:
return a * converter[u]
except KeyError:
avail_units = _avail_keys(converter)
raise ValueError(f"Unknown unit: {unit}. Available units are: {avail_units}")
| from typing import Optional, Dict, Callable
import torch
# This file contains various physical constants and functions to convert units
# from the atomic units
__all__ = ["length_to", "time_to", "freq_to", "ir_ints_to", "raman_ints_to",
"edipole_to", "equadrupole_to"]
# 1 atomic unit in SI
LENGTH = 5.29177210903e-11 # m
TIME = 2.4188843265857e-17 # s
CHARGE = 1.602176634e-19 # C
# 1 atomic unit in other unit
DEBYE = 2.541746473 # Debye (for dipole)
ANGSTROM = LENGTH / 1e-10 # angstrom (length)
AMU = 5.485799090649e-4 # atomic mass unit (mass)
# constants in SI
LIGHT_SPEED = 2.99792458e8 # m/s
# scales
ATTO = 1e-15
FEMTO = 1e-12
NANO = 1e-9
MICRO = 1e-6
MILLI = 1e-3
CENTI = 1e-2
DECI = 1e-1
KILO = 1e3
MEGA = 1e6
GIGA = 1e9
TERA = 1e12
PhysVarType = torch.Tensor
UnitType = Optional[str]
_length_converter = {
"angst": ANGSTROM,
"angstrom": ANGSTROM,
"m": LENGTH,
"cm": LENGTH / CENTI,
}
_freq_converter = {
"cm-1": CENTI / TIME / LIGHT_SPEED,
"cm^-1": CENTI / TIME / LIGHT_SPEED,
"hz": 1.0 / TIME,
"khz": 1.0 / TIME / KILO,
"mhz": 1.0 / TIME / MEGA,
"ghz": 1.0 / TIME / GIGA,
"thz": 1.0 / TIME / TERA,
}
_ir_ints_converter = {
"(debye/angst)^2/amu": (DEBYE / ANGSTROM) ** 2 / AMU,
"km/mol": (DEBYE / ANGSTROM) ** 2 / AMU * 42.256, # from https://dx.doi.org/10.1002%2Fjcc.24344
}
_raman_ints_converter = {
"angst^4/amu": ANGSTROM ** 4 / AMU,
}
_time_converter = {
"s": TIME,
"us": TIME / MICRO,
"ns": TIME / NANO,
"fs": TIME / FEMTO,
}
_edipole_converter = {
"d": DEBYE,
"debye": DEBYE,
"c*m": DEBYE, # Coulomb meter
}
_equadrupole_converter = {
"debye*angst": DEBYE * ANGSTROM # Debye angstrom
}
def _avail_keys(converter: Dict[str, float]) -> str:
# returns the available keys in a string of list of string
return str(list(_length_converter.keys()))
def _add_docstr_to(phys: str, converter: Dict[str, float]) -> Callable:
# automatically add docstring for converter functions
def decorator(callable: Callable):
callable.__doc__ = f"""
Convert the {phys} from atomic unit to the given unit.
Available units are (case-insensitive): {_avail_keys(converter)}
"""
return callable
return decorator
@_add_docstr_to("time", _time_converter)
def time_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit time from atomic unit to the given unit
return _converter_to(a, unit, _time_converter)
@_add_docstr_to("frequency", _freq_converter)
def freq_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit frequency from atomic unit to the given unit
return _converter_to(a, unit, _freq_converter)
@_add_docstr_to("IR intensity", _ir_ints_converter)
def ir_ints_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit IR intensity from atomic unit to the given unit
return _converter_to(a, unit, _ir_ints_converter)
@_add_docstr_to("Raman intensity", _raman_ints_converter)
def raman_ints_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit IR intensity from atomic unit to the given unit
return _converter_to(a, unit, _raman_ints_converter)
@_add_docstr_to("length", _length_converter)
def length_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit length from atomic unit to the given unit
return _converter_to(a, unit, _length_converter)
@_add_docstr_to("electric dipole", _edipole_converter)
def edipole_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit electric dipole from atomic unit to the given unit
return _converter_to(a, unit, _edipole_converter)
@_add_docstr_to("electric quadrupole", _equadrupole_converter)
def equadrupole_to(a: PhysVarType, unit: UnitType) -> PhysVarType:
# convert unit electric dipole from atomic unit to the given unit
return _converter_to(a, unit, _equadrupole_converter)
def _converter_to(a: PhysVarType, unit: UnitType, converter: Dict[str, float]) -> PhysVarType:
# converter from the atomic unit
if unit is None:
return a
u = unit.lower()
try:
return a * converter[u]
except KeyError:
avail_units = _avail_keys(converter)
raise ValueError(f"Unknown unit: {unit}. Available units are: {avail_units}")
| en | 000696965_Jaikinator-dqc_units_ac11863adb40.py | unknown | 1,662 |
# Code generated by github.com/lolopinto/ent/ent, DO NOT edit.
"""add guest_data table
Revision ID: 9fe6423022c2
Revises: fd8bc05fbc78
Create Date: 2021-01-25 19:08:22.522260+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9fe6423022c2'
down_revision = 'fd8bc05fbc78'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('guest_data',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=False),
sa.Column('guest_id', postgresql.UUID(), nullable=False),
sa.Column('event_id', postgresql.UUID(), nullable=False),
sa.Column('dietary_restrictions',
sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], [
'events.id'], name='guest_data_event_id_fkey', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['guest_id'], [
'guests.id'], name='guest_data_guest_id_fkey', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name='guest_data_id_pkey')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('guest_data')
# ### end Alembic commands ###
| # Code generated by github.com/lolopinto/ent/ent, DO NOT edit.
"""add guest_data table
Revision ID: 9fe6423022c2
Revises: fd8bc05fbc78
Create Date: 2021-01-25 19:08:22.522260+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '9fe6423022c2'
down_revision = 'fd8bc05fbc78'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('guest_data',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=False),
sa.Column('guest_id', postgresql.UUID(), nullable=False),
sa.Column('event_id', postgresql.UUID(), nullable=False),
sa.Column('dietary_restrictions',
sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['event_id'], [
'events.id'], name='guest_data_event_id_fkey', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['guest_id'], [
'guests.id'], name='guest_data_guest_id_fkey', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name='guest_data_id_pkey')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('guest_data')
# ### end Alembic commands ###
| en | 000317035_lazytype-ent_9fe6423022c2_202112519822_add_guest_data_table_de6e03b1f754.py | unknown | 511 |
import pickle
import pdb
from onconet.utils.c_index import get_censoring_dist
NO_DATASET_ERR = "Dataset {} not in DATASET_REGISTRY! Available datasets are {}"
DATASET_REGISTRY = {}
def RegisterDataset(dataset_name):
"""Registers a dataset."""
def decorator(f):
DATASET_REGISTRY[dataset_name] = f
return f
return decorator
def get_dataset_class(args):
if args.dataset not in DATASET_REGISTRY:
raise Exception(
NO_DATASET_ERR.format(args.dataset, DATASET_REGISTRY.keys()))
return DATASET_REGISTRY[args.dataset]
def build_path_to_hidden_dict(args):
res = pickle.load(open(args.hiddens_results_path,'rb'))
path_to_hidden = {}
for split in ['train','dev','test']:
hiddens, paths = res['{}_hiddens'.format(split)]
for indx, path in enumerate(paths):
path_to_hidden[path] = hiddens[indx]
print("Built path to hidden dict with {} paths, of dim: {}".format(len(path_to_hidden), hiddens[0].shape[0]))
return path_to_hidden, hiddens[0].shape[0]
# Depending on arg, build dataset
def get_dataset(args, transformers, test_transformers):
dataset_class = get_dataset_class(args)
if args.ten_fold_cross_val or args.use_precomputed_hiddens:
args.patient_to_partition_dict = {}
if args.use_precomputed_hiddens:
path_to_hidden_dict, args.hidden_dim = build_path_to_hidden_dict(args)
if args.force_input_dim:
args.hidden_dim = args.input_dim
path_to_hidden_dict = (lambda input_dim, path_to_hidden_dict : {k:v[:input_dim] for k,v in path_to_hidden_dict.items()})(args.input_dim, path_to_hidden_dict)
args.precomputed_hidden_dim = args.hidden_dim
args.exam_to_year_dict = {}
args.exam_to_device_dict = {}
train = dataset_class(args, transformers, 'train')
dev = dataset_class(args, test_transformers, 'dev')
test = dataset_class(args, test_transformers, 'test')
if args.survival_analysis_setup:
args.censoring_distribution = get_censoring_dist(train if len(train) > 0 else test)
if args.use_precomputed_hiddens:
train.path_to_hidden_dict = path_to_hidden_dict
dev.path_to_hidden_dict = path_to_hidden_dict
test.path_to_hidden_dict = path_to_hidden_dict
return train, dev, test
| import pickle
import pdb
from onconet.utils.c_index import get_censoring_dist
NO_DATASET_ERR = "Dataset {} not in DATASET_REGISTRY! Available datasets are {}"
DATASET_REGISTRY = {}
def RegisterDataset(dataset_name):
"""Registers a dataset."""
def decorator(f):
DATASET_REGISTRY[dataset_name] = f
return f
return decorator
def get_dataset_class(args):
if args.dataset not in DATASET_REGISTRY:
raise Exception(
NO_DATASET_ERR.format(args.dataset, DATASET_REGISTRY.keys()))
return DATASET_REGISTRY[args.dataset]
def build_path_to_hidden_dict(args):
res = pickle.load(open(args.hiddens_results_path,'rb'))
path_to_hidden = {}
for split in ['train','dev','test']:
hiddens, paths = res['{}_hiddens'.format(split)]
for indx, path in enumerate(paths):
path_to_hidden[path] = hiddens[indx]
print("Built path to hidden dict with {} paths, of dim: {}".format(len(path_to_hidden), hiddens[0].shape[0]))
return path_to_hidden, hiddens[0].shape[0]
# Depending on arg, build dataset
def get_dataset(args, transformers, test_transformers):
dataset_class = get_dataset_class(args)
if args.ten_fold_cross_val or args.use_precomputed_hiddens:
args.patient_to_partition_dict = {}
if args.use_precomputed_hiddens:
path_to_hidden_dict, args.hidden_dim = build_path_to_hidden_dict(args)
if args.force_input_dim:
args.hidden_dim = args.input_dim
path_to_hidden_dict = (lambda input_dim, path_to_hidden_dict : {k:v[:input_dim] for k,v in path_to_hidden_dict.items()})(args.input_dim, path_to_hidden_dict)
args.precomputed_hidden_dim = args.hidden_dim
args.exam_to_year_dict = {}
args.exam_to_device_dict = {}
train = dataset_class(args, transformers, 'train')
dev = dataset_class(args, test_transformers, 'dev')
test = dataset_class(args, test_transformers, 'test')
if args.survival_analysis_setup:
args.censoring_distribution = get_censoring_dist(train if len(train) > 0 else test)
if args.use_precomputed_hiddens:
train.path_to_hidden_dict = path_to_hidden_dict
dev.path_to_hidden_dict = path_to_hidden_dict
test.path_to_hidden_dict = path_to_hidden_dict
return train, dev, test
| en | 000153257_harrivle-Mirai_factory_bfbc756459e6.py | unknown | 777 |
#!/usr/bin/env python3
# Copyright (C) Alibaba Group Holding Limited.
""" Epic-Kitchens dataset. """
import os
import random
import torch
import torch.utils.data
import utils.logging as logging
import time
import oss2 as oss
from torchvision.transforms import Compose
import torchvision.transforms._transforms_video as transforms
import torch.nn.functional as F
from datasets.utils.transformations import (
ColorJitter,
KineticsResizedCrop
)
from datasets.base.base_dataset import BaseVideoDataset
from datasets.utils.random_erasing import RandomErasing
import utils.bucket as bu
from datasets.base.builder import DATASET_REGISTRY
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Epickitchen100(BaseVideoDataset):
def __init__(self, cfg, split):
super(Epickitchen100, self).__init__(cfg, split)
if (self.split == "test" or self.split == "submission") and self.cfg.PRETRAIN.ENABLE == False:
self._pre_transformation_config_required = True
def _get_dataset_list_name(self):
"""
Returns the list for the dataset.
Returns:
dataset_list_name (str)
"""
if self.split == "train":
if self.cfg.TRAIN.TRAIN_VAL_COMBINE:
train_list = "train_val"
else:
train_list = "train"
name = "EPIC_100_{}.csv".format(
train_list if self.split == "train" else "validation" if not self.split == "submission" else "test_timestamps",
)
logger.info("Reading video list from file: {}".format(name))
return name
def _get_sample_info(self, index):
"""
Returns the sample info corresponding to the index.
Args:
index (int): target index
Returns:
sample_info (dict): contains different informations to be used later
"name": the name of the video
"path": the path of the video for the specified index
"verb_class": verb label of the video
"noun_class": noun label of the video
"""
if not self.split == "submission":
video_name = self._samples[index][0]
verb_class = self._samples[index][10]
noun_class = self._samples[index][12]
video_path = os.path.join(self.data_root_dir, video_name+".MP4")
else:
# if the split is submission, then no label is available
# we simply set the verb class and the noun class to zero
video_name = self._samples[index][0]
verb_class = 0
noun_class = 0
video_path = os.path.join(self.data_root_dir, video_name+".MP4")
if self.cfg.DATA.MULTI_LABEL or not hasattr(self.cfg.DATA, "TRAIN_VERSION"):
supervised_label = {
"verb_class": verb_class,
"noun_class": noun_class
}
else:
if self.cfg.DATA.TRAIN_VERSION == "only_train_verb":
supervised_label = verb_class
elif self.cfg.DATA.TRAIN_VERSION == "only_train_noun":
supervised_label = noun_class
sample_info = {
"name": video_name,
"path": video_path,
"supervised_label": supervised_label
}
return sample_info
def _config_transform(self):
"""
Configs the transform for the dataset.
For train, we apply random cropping, random horizontal flip, random color jitter (optionally),
normalization and random erasing (optionally).
For val and test, we apply controlled spatial cropping and normalization.
The transformations are stored as a callable function to "self.transforms".
"""
self.transform = None
if self.split == 'train' and not self.cfg.PRETRAIN.ENABLE:
std_transform_list = [
transforms.ToTensorVideo(),
KineticsResizedCrop(
short_side_range = [self.cfg.DATA.TRAIN_JITTER_SCALES[0], self.cfg.DATA.TRAIN_JITTER_SCALES[1]],
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE,
),
transforms.RandomHorizontalFlipVideo()
]
# Add color aug
if self.cfg.AUGMENTATION.COLOR_AUG:
std_transform_list.append(
ColorJitter(
brightness=self.cfg.AUGMENTATION.BRIGHTNESS,
contrast=self.cfg.AUGMENTATION.CONTRAST,
saturation=self.cfg.AUGMENTATION.SATURATION,
hue=self.cfg.AUGMENTATION.HUE,
grayscale=self.cfg.AUGMENTATION.GRAYSCALE,
consistent=self.cfg.AUGMENTATION.CONSISTENT,
shuffle=self.cfg.AUGMENTATION.SHUFFLE,
gray_first=self.cfg.AUGMENTATION.GRAY_FIRST,
),
)
std_transform_list += [
transforms.NormalizeVideo(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
),
RandomErasing(self.cfg)
]
self.transform = Compose(std_transform_list)
elif self.split == 'val' or self.split == 'test' or self.split == "submission":
self.resize_video = KineticsResizedCrop(
short_side_range = [self.cfg.DATA.TEST_SCALE, self.cfg.DATA.TEST_SCALE],
crop_size = self.cfg.DATA.TEST_CROP_SIZE,
num_spatial_crops = self.cfg.TEST.NUM_SPATIAL_CROPS
)
std_transform_list = [
transforms.ToTensorVideo(),
self.resize_video,
transforms.NormalizeVideo(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
)
]
self.transform = Compose(std_transform_list)
def _pre_transformation_config(self):
"""
Set transformation parameters if required.
"""
self.resize_video.set_spatial_index(self.spatial_idx)
| #!/usr/bin/env python3
# Copyright (C) Alibaba Group Holding Limited.
""" Epic-Kitchens dataset. """
import os
import random
import torch
import torch.utils.data
import utils.logging as logging
import time
import oss2 as oss
from torchvision.transforms import Compose
import torchvision.transforms._transforms_video as transforms
import torch.nn.functional as F
from datasets.utils.transformations import (
ColorJitter,
KineticsResizedCrop
)
from datasets.base.base_dataset import BaseVideoDataset
from datasets.utils.random_erasing import RandomErasing
import utils.bucket as bu
from datasets.base.builder import DATASET_REGISTRY
logger = logging.get_logger(__name__)
@DATASET_REGISTRY.register()
class Epickitchen100(BaseVideoDataset):
def __init__(self, cfg, split):
super(Epickitchen100, self).__init__(cfg, split)
if (self.split == "test" or self.split == "submission") and self.cfg.PRETRAIN.ENABLE == False:
self._pre_transformation_config_required = True
def _get_dataset_list_name(self):
"""
Returns the list for the dataset.
Returns:
dataset_list_name (str)
"""
if self.split == "train":
if self.cfg.TRAIN.TRAIN_VAL_COMBINE:
train_list = "train_val"
else:
train_list = "train"
name = "EPIC_100_{}.csv".format(
train_list if self.split == "train" else "validation" if not self.split == "submission" else "test_timestamps",
)
logger.info("Reading video list from file: {}".format(name))
return name
def _get_sample_info(self, index):
"""
Returns the sample info corresponding to the index.
Args:
index (int): target index
Returns:
sample_info (dict): contains different informations to be used later
"name": the name of the video
"path": the path of the video for the specified index
"verb_class": verb label of the video
"noun_class": noun label of the video
"""
if not self.split == "submission":
video_name = self._samples[index][0]
verb_class = self._samples[index][10]
noun_class = self._samples[index][12]
video_path = os.path.join(self.data_root_dir, video_name+".MP4")
else:
# if the split is submission, then no label is available
# we simply set the verb class and the noun class to zero
video_name = self._samples[index][0]
verb_class = 0
noun_class = 0
video_path = os.path.join(self.data_root_dir, video_name+".MP4")
if self.cfg.DATA.MULTI_LABEL or not hasattr(self.cfg.DATA, "TRAIN_VERSION"):
supervised_label = {
"verb_class": verb_class,
"noun_class": noun_class
}
else:
if self.cfg.DATA.TRAIN_VERSION == "only_train_verb":
supervised_label = verb_class
elif self.cfg.DATA.TRAIN_VERSION == "only_train_noun":
supervised_label = noun_class
sample_info = {
"name": video_name,
"path": video_path,
"supervised_label": supervised_label
}
return sample_info
def _config_transform(self):
"""
Configs the transform for the dataset.
For train, we apply random cropping, random horizontal flip, random color jitter (optionally),
normalization and random erasing (optionally).
For val and test, we apply controlled spatial cropping and normalization.
The transformations are stored as a callable function to "self.transforms".
"""
self.transform = None
if self.split == 'train' and not self.cfg.PRETRAIN.ENABLE:
std_transform_list = [
transforms.ToTensorVideo(),
KineticsResizedCrop(
short_side_range = [self.cfg.DATA.TRAIN_JITTER_SCALES[0], self.cfg.DATA.TRAIN_JITTER_SCALES[1]],
crop_size = self.cfg.DATA.TRAIN_CROP_SIZE,
),
transforms.RandomHorizontalFlipVideo()
]
# Add color aug
if self.cfg.AUGMENTATION.COLOR_AUG:
std_transform_list.append(
ColorJitter(
brightness=self.cfg.AUGMENTATION.BRIGHTNESS,
contrast=self.cfg.AUGMENTATION.CONTRAST,
saturation=self.cfg.AUGMENTATION.SATURATION,
hue=self.cfg.AUGMENTATION.HUE,
grayscale=self.cfg.AUGMENTATION.GRAYSCALE,
consistent=self.cfg.AUGMENTATION.CONSISTENT,
shuffle=self.cfg.AUGMENTATION.SHUFFLE,
gray_first=self.cfg.AUGMENTATION.GRAY_FIRST,
),
)
std_transform_list += [
transforms.NormalizeVideo(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
),
RandomErasing(self.cfg)
]
self.transform = Compose(std_transform_list)
elif self.split == 'val' or self.split == 'test' or self.split == "submission":
self.resize_video = KineticsResizedCrop(
short_side_range = [self.cfg.DATA.TEST_SCALE, self.cfg.DATA.TEST_SCALE],
crop_size = self.cfg.DATA.TEST_CROP_SIZE,
num_spatial_crops = self.cfg.TEST.NUM_SPATIAL_CROPS
)
std_transform_list = [
transforms.ToTensorVideo(),
self.resize_video,
transforms.NormalizeVideo(
mean=self.cfg.DATA.MEAN,
std=self.cfg.DATA.STD,
inplace=True
)
]
self.transform = Compose(std_transform_list)
def _pre_transformation_config(self):
"""
Set transformation parameters if required.
"""
self.resize_video.set_spatial_index(self.spatial_idx)
| en | 000578429_jiangzeyinzi-EssentialMC2_epickitchen100_f4bf8df56d7a.py | unknown | 1,698 |
#!/usr/bin/python
import argparse
import sys
from ldif import LDIFParser, LDIFWriter
class ActiveDirectoryToOpenLdapLDIFConvertor(LDIFParser):
objectclassAddsBasedOnDN = { 'CN=ExchangeActiveSyncDevices' : 'exchangeActiveSyncDevices'
}
objectclassChangesBasedOnDN = { 'CN=_Template ': { 'user': 'customActiveDirectoryUserTemplate' },
'CN=_Template_': { 'user': 'customActiveDirectoryUserTemplate' },
'CN=_Template\, ': { 'user': 'customActiveDirectoryUserTemplate' }
}
objectclassMappings = { 'top' : 'mstop', 'user' : 'customActiveDirectoryUser', 'group' : 'customActiveDirectoryGroup',
'contact' : 'customActiveDirectoryContact' }
attributetypesValuesDuplicates = [ 'dSCorePropagationData' ]
def __init__(self, input, output):
LDIFParser.__init__(self, input)
self.writer = LDIFWriter(output)
def addObjectclassesBasedOnDN(self, dn, entry):
for objAdd in self.objectclassAddsBasedOnDN:
if objAdd.lower() in dn.lower(): # case insensitive match
if 'objectClass' not in entry.keys():
entry['objectClass'] = [ ]
entry['objectClass'].append(self.objectclassAddsBasedOnDN[objAdd]);
def changeObjectclassesBasedOnDN(self, dn, entry):
if 'objectClass' not in entry.keys():
return
for objChange in self.objectclassChangesBasedOnDN:
if objChange.lower() in dn.lower(): # case insensitive match
for objSource in self.objectclassChangesBasedOnDN[objChange]:
index = 0
for objTarget in entry['objectClass']:
if objSource == objTarget:
entry['objectClass'][index] = self.objectclassChangesBasedOnDN[objChange][objSource]
index += 1
def changeObjectclasses(self, dn, entry):
if 'objectClass' in entry.keys():
index = 0
for objectclass in entry['objectClass']:
for objMap in self.objectclassMappings:
if objMap == objectclass:
entry['objectClass'][index] = self.objectclassMappings[objMap]
index += 1
def removeDuplicateAttributeValues(self, dn, entry):
for attributetype in self.attributetypesValuesDuplicates:
if attributetype in entry.keys():
entry[attributetype] = list(set(entry[attributetype]))
def handle(self, dn, entry):
self.addObjectclassesBasedOnDN(dn, entry)
self.changeObjectclassesBasedOnDN(dn, entry)
self.changeObjectclasses(dn, entry)
self.removeDuplicateAttributeValues(dn, entry)
self.writer.unparse(dn, entry)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='',
)
parser.add_argument('--src', metavar='SOURCE', help='Source ldif')
parser.add_argument('--dst', metavar='DESTINATION', help='Destination ldif')
args = parser.parse_args()
adparser = ActiveDirectoryToOpenLdapLDIFConvertor(open(args.src, 'rb'), open(args.dst, 'wb'))
adparser.parse()
| #!/usr/bin/python
import argparse
import sys
from ldif import LDIFParser, LDIFWriter
class ActiveDirectoryToOpenLdapLDIFConvertor(LDIFParser):
objectclassAddsBasedOnDN = { 'CN=ExchangeActiveSyncDevices' : 'exchangeActiveSyncDevices'
}
objectclassChangesBasedOnDN = { 'CN=_Template ': { 'user': 'customActiveDirectoryUserTemplate' },
'CN=_Template_': { 'user': 'customActiveDirectoryUserTemplate' },
'CN=_Template\, ': { 'user': 'customActiveDirectoryUserTemplate' }
}
objectclassMappings = { 'top' : 'mstop', 'user' : 'customActiveDirectoryUser', 'group' : 'customActiveDirectoryGroup',
'contact' : 'customActiveDirectoryContact' }
attributetypesValuesDuplicates = [ 'dSCorePropagationData' ]
def __init__(self, input, output):
LDIFParser.__init__(self, input)
self.writer = LDIFWriter(output)
def addObjectclassesBasedOnDN(self, dn, entry):
for objAdd in self.objectclassAddsBasedOnDN:
if objAdd.lower() in dn.lower(): # case insensitive match
if 'objectClass' not in entry.keys():
entry['objectClass'] = [ ]
entry['objectClass'].append(self.objectclassAddsBasedOnDN[objAdd]);
def changeObjectclassesBasedOnDN(self, dn, entry):
if 'objectClass' not in entry.keys():
return
for objChange in self.objectclassChangesBasedOnDN:
if objChange.lower() in dn.lower(): # case insensitive match
for objSource in self.objectclassChangesBasedOnDN[objChange]:
index = 0
for objTarget in entry['objectClass']:
if objSource == objTarget:
entry['objectClass'][index] = self.objectclassChangesBasedOnDN[objChange][objSource]
index += 1
def changeObjectclasses(self, dn, entry):
if 'objectClass' in entry.keys():
index = 0
for objectclass in entry['objectClass']:
for objMap in self.objectclassMappings:
if objMap == objectclass:
entry['objectClass'][index] = self.objectclassMappings[objMap]
index += 1
def removeDuplicateAttributeValues(self, dn, entry):
for attributetype in self.attributetypesValuesDuplicates:
if attributetype in entry.keys():
entry[attributetype] = list(set(entry[attributetype]))
def handle(self, dn, entry):
self.addObjectclassesBasedOnDN(dn, entry)
self.changeObjectclassesBasedOnDN(dn, entry)
self.changeObjectclasses(dn, entry)
self.removeDuplicateAttributeValues(dn, entry)
self.writer.unparse(dn, entry)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='',
)
parser.add_argument('--src', metavar='SOURCE', help='Source ldif')
parser.add_argument('--dst', metavar='DESTINATION', help='Destination ldif')
args = parser.parse_args()
adparser = ActiveDirectoryToOpenLdapLDIFConvertor(open(args.src, 'rb'), open(args.dst, 'wb'))
adparser.parse()
| en | 000318413_mvrck0-active-directory-devcontainer_ldif-convertor_952e78ada320.py | unknown | 856 |
import os.path
from absl import logging
from icubam.www.handlers import base
class DisclaimerHandler(base.BaseHandler):
ROUTE = '/disclaimer'
def initialize(self, config, db_factory):
super().initialize(config, db_factory)
def get_disclaimer_html(self):
"""To show a disclaimer page if specified in configuration."""
path = self.config.server.disclaimer
if os.path.exists(path):
with open(path, 'r') as fp:
return fp.read()
else:
logging.warning(
f"Disclaimer file from config {path} is set but not available"
)
return ""
def get_current_user(self):
"""This route is not secured at first."""
return None
async def get(self):
"""Serves the page filled with the configuration specified file."""
if self.config.server.has_key('disclaimer'):
html = self.get_disclaimer_html()
data = {'disclaimer': html}
self.render('disclaimer.html', **data)
| import os.path
from absl import logging
from icubam.www.handlers import base
class DisclaimerHandler(base.BaseHandler):
ROUTE = '/disclaimer'
def initialize(self, config, db_factory):
super().initialize(config, db_factory)
def get_disclaimer_html(self):
"""To show a disclaimer page if specified in configuration."""
path = self.config.server.disclaimer
if os.path.exists(path):
with open(path, 'r') as fp:
return fp.read()
else:
logging.warning(
f"Disclaimer file from config {path} is set but not available"
)
return ""
def get_current_user(self):
"""This route is not secured at first."""
return None
async def get(self):
"""Serves the page filled with the configuration specified file."""
if self.config.server.has_key('disclaimer'):
html = self.get_disclaimer_html()
data = {'disclaimer': html}
self.render('disclaimer.html', **data)
| en | 000354583_rth-icubam_disclaimer_7c31f19828a0.py | unknown | 275 |
Language Decoded | Multilingual Code Dataset
Multilingual Python code datasets for the Language Decoded project (part of Cohere's Tiny Aya Expedition), investigating whether code's reasoning benefit for language models is language-dependent or structure-dependent.
Research Question
Does fine-tuning on non-English code (Python with translated keywords) improve multilingual reasoning as much as English code does?
Prior work (Aryabumi et al., 2024 -- "To Code or Not to Code") demonstrated that including English code in pre-training data improves downstream reasoning performance by approximately 8%. However, that study only tested English code. This dataset enables the natural follow-up: does the reasoning benefit come from the structure of code, or from the language of its keywords?
Dataset Description
This dataset provides filtered, quality-controlled Python source code in four configurations: the original English and three keyword-swapped variants (Chinese, Spanish, Urdu). The source data is drawn from bigcode/the-stack-dedup (Python subset), filtered for quality using the following criteria:
- AST-valid Python only (must parse without errors)
- Permissive licenses only (MIT, Apache-2.0, BSD, etc.)
- 10--1000 lines of code
- Minimum 21 GitHub stars
- No autogenerated files
- SHA-256 deduplication
Keyword-swapped variants are produced using Legesher v0.7.3, which translates Python reserved words (37 keywords, 72 builtins, 66 exceptions) into the target language while preserving code structure and semantics.
Available Configs
| Config | Condition | Language | Description |
|---|---|---|---|
condition-1-en |
Condition 1 (control) | English | Unmodified filtered Python from The Stack Dedup |
condition-2-ur |
Condition 2 | Urdu | Keyword-swapped Python -- 37 keywords, 72 builtins, 66 exceptions translated via Legesher v0.7.3 |
condition-2-zh |
Condition 2 | Chinese | Keyword-swapped Python -- same transpilation method |
condition-2-es |
Condition 2 | Spanish | Keyword-swapped Python -- same transpilation method |
Schema
| Column | Type | Description |
|---|---|---|
code |
string | Python source code. For condition-2 configs, this is the transpiled (keyword-swapped) version. For condition-1, this is the original English source. |
code_en |
string | Original English Python source code. Identical to code for condition-1-en. |
language |
string | ISO 639-1 language code: en, ur, zh, or es. |
file_path |
string | Original file path in The Stack Dedup. |
license |
string | SPDX license identifier for the source file. |
token_count |
int64 | Token count computed using the CohereLabs/tiny-aya-base tokenizer. |
Experimental Conditions
The Language Decoded experiment uses a ladder of six conditions to isolate the mechanism behind code's reasoning benefit. This dataset currently provides data for conditions 1 and 2:
| Condition | Name | Purpose |
|---|---|---|
| Baseline | No fine-tuning | Establishes the performance floor |
| Condition 1 | English code | Tests whether code fine-tuning helps at all (replicates Aryabumi et al.) |
| Condition 2 | Keyword-swapped code | Tests whether the language of keywords matters for the reasoning benefit |
| Conditions 3--6 | (planned) | Additional controls not yet included in this dataset |
Usage
from datasets import load_dataset
# Load English code (control)
ds = load_dataset("legesher/language-decoded-data", "condition-1-en")
# Load a keyword-swapped variant
ds = load_dataset("legesher/language-decoded-data", "condition-2-ur")
ds = load_dataset("legesher/language-decoded-data", "condition-2-zh")
ds = load_dataset("legesher/language-decoded-data", "condition-2-es")
# Access splits
train = ds["train"]
val = ds["validation"]
Technical Details
| Parameter | Value |
|---|---|
| Source dataset | bigcode/the-stack-dedup (Python subset) |
| Transpilation tool | Legesher v0.7.3 (legesher-core, legesher-i18n) |
| Tokenizer | CohereLabs/tiny-aya-base |
| Base model | CohereLabs/tiny-aya-base (3.35B params) |
| Train/validation split | 90% / 10% (seed 42) |
| File format | Parquet (snappy compression) |
| Filtering criteria | AST-valid, permissive licenses, 10--1000 lines, min 21 GitHub stars, no autogenerated files, SHA-256 deduplication |
Citation
@misc{language-decoded-2026,
title={Language Decoded: Investigating Language-Dependent vs. Structure-Dependent Reasoning Benefits of Code},
author={Madison Edgar and Saad Bazaz and Rafay Mustafa and Sarah Jawaid and Rashik Shahjahan and Khojasteh Mirza and Sohaib Bazaz},
year={2026},
publisher={Hugging Face},
url={https://huggingface.co/datasets/legesher/language-decoded-data}
}
Links
License
Apache 2.0
- Downloads last month
- 19