code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1
value | license stringclasses 15
values | size int64 3 1.05M |
|---|---|---|---|---|---|
from osuapi import OsuApi, AHConnector
from discord.ext import commands
from .utils import utils
import datetime
import discord
import logging
import aiohttp
import os
log = logging.getLogger(__name__)
class Core(commands.Cog):
"""
The core of Nurevam, just essentials.
"""
def __init__(self,bot):
self.bot = bot
self.redis=bot.db.redis
self.bot.say_edit = bot.say
def get_bot_uptime(self): #to calculates how long it been up
now = datetime.datetime.utcnow()
delta = now - self.bot.uptime
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
if days:
fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds'
else:
fmt = '{h} hours, {m} minutes, and {s} seconds'
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
def get_time_delta(self,person):
delta = datetime.datetime.utcnow() - person
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
if days:
fmt = '{d} days, {h} hours, {m} minutes, and {s} seconds'
else:
fmt = '{h} hours, {m} minutes, and {s} seconds'
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
@commands.command()
async def uptime(self,ctx): #Showing Time that bot been total run
"""Prints the uptime."""
await self.bot.say(ctx,content = "```py\nI have been up for {}\n```".format(self.get_bot_uptime()))
@commands.command()
async def prefix(self,ctx):
prefix = (await self.redis.get("{}:Config:CMD_Prefix".format(ctx.message.guild.id)))
prefix = set(prefix + ctx.prefix) #if user didnt set any, it will be default to ! which set prefix to be None? In case it is not, we can add current prefix to it.
await self.bot.say(ctx,content = "```\n{}\n```".format(",".join(prefix)))
@commands.command()
async def info(self,ctx,*,person:discord.Member = None):
"""
About Nurevam or person by mention info
"""
if not person:
guild = len(self.bot.guilds)
member = len(set(self.bot.get_all_members()))
app = await self.bot.application_info()
msg = "Name:{}".format(self.bot.user)
if ctx.message.guild.me.nick:
msg += "\nNickname:{}".format(ctx.message.guild.me.nick)
msg += "\nCreator: {}".format(app.owner)
msg += "\nServer:{}\nMembers:{}".format(guild,member)
link = "If you want to invite this bot to your server, you can check it out here <http://nurevam.site>!"
return await self.bot.say(ctx,content = "```xl\n{}\n```\n{}\n".format(msg,link))
else:
e = discord.Embed()
e.title = "{} - {}".format(person,person.id)
e.set_thumbnail(url = person.avatar_url)
e.add_field(name = "Created at", value="{} - ({})".format(person.created_at,self.get_time_delta(person.created_at)),inline=False)
e.add_field(name = "Joined at", value="{} - ({})".format(person.joined_at,self.get_time_delta(person.joined_at)),inline=False)
e.add_field(name = "Total Roles", value=str(len(person.roles)),inline=False)
if person.colour.value:
e.colour = person.color
await self.bot.say(ctx,embed = e)
@commands.command()
async def serverinfo(self,ctx):
"""
Give info about this server
"""
g = ctx.guild
embed = discord.Embed()
embed.set_thumbnail(url = g.icon_url)
embed.title = "{} - {}".format(g.name,g.id)
embed.add_field(name = "Owner",value="{} - {}".format(g.owner,g.owner.id),inline=False)
embed.add_field(name = "Created at", value = str(g.created_at), inline=False)
embed.add_field(name = "Total Roles", value= str(len(g.roles)), inline=False)
embed.add_field(name = "Total Members", value= str(g.member_count), inline=False)
embed.add_field(name = "Premium Member", value= str(g.premium_subscription_count), inline=False)
embed.add_field(name = "Premium Tier", value= str(g.premium_tier), inline=False)
await self.bot.say(ctx,embed = embed)
@commands.command(hidden=True)
async def command(self,ctx):
"""
Type !help {command} for more info on a command.
You can also type !help {category} for more info on a category.
For example, !help level (If you have level plugin enable!)
"""
await ctx.send("Yes this is a command.")
@commands.command(hidden=True)
async def category(self,ctx):
"""
Type !help command for additional info on a command.
You can also type !help category for additional info on a category.
For example, type !help Level (If you have the level plugin enable!)
"""
await ctx.send("Yes this is a category.")
@commands.command(brief = "Showing which plugin is enable")
async def plugin(self,ctx):
"""
Red = Disable
Blue = Enable
Any problem such as plugins on dashboard is enable but show disable here, info Owner
"""
special_case = {"Anime":"myanimelist","Anti Raid":"antiraid"}
plugin_setting = await self.redis.hgetall("{}:Config:Cogs".format(ctx.message.guild.id))
embed = discord.Embed()
cogs = self.bot.cogs.keys()
for x in cogs:
setting = u"\U0001F534" #red
if x in ("Core", "Remindme", "Tools", "REPL","Events"): # A Owner's thing only.
if ctx.message.author.id != self.bot.owner.id:
continue
setting = u"\U0001F535" #blue
if x.lower() in plugin_setting or special_case.get(x) in plugin_setting:
setting = u"\U0001F535" #blue
embed.add_field(name = x,value = setting)
if ctx.message.guild.me.colour.value:
embed.colour = ctx.message.guild.me.colour
embed.set_footer(text = "{} = Disable | {} = Enable".format(u"\U0001F534",u"\U0001F535"))
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(rate = 1,per=300,type = commands.BucketType.user)
async def feedback(self,ctx,*,msg):
"""
Gives any feedback about bot. Cooldown: 5 min
For example, reporting bot, new idea/suggestions.
A quicker way to get hold of owner without joining server.
Sooner or later, bot may(not) contact you via PMS about status of your requests.
Only able to make feedback once a five minute.
"""
embed = discord.Embed()
embed.set_author(name = ctx.message.author,icon_url=ctx.message.author.avatar_url or ctx.message.author.default_avatar_url)
embed.add_field(name = "Author",value = "**ID**:{0.id}".format(ctx.message))
embed.add_field(name = "Server",value = "**Name**:{0.guild.name}\n**ID**:{0.guild.id}\n**Channel**:{0.channel.name} - {0.channel.id}".format(ctx.message))
embed.add_field(name = "Feedback",value = msg)
channel = self.bot.get_channel(292133726370922497)
await channel.send(embed=embed)
await ctx.send(u"\U0001F44C"+", Thank you for your valuable feedback. \nHopefully, the owner will reply to you soon.")
@commands.command(hidden=True)
@commands.check(utils.is_owner)
async def pm(self,ctx,user_id:int,*,msg):
user = self.bot.get_user(user_id)
print(user)
print(msg)
if user is None:
return await ctx.send("User wasn't found.")
message = "I have got a message from the owner,{}\n```fix\n{}\n```" \
"\n\nPlease note that the owner will not able to see any message of this before or after.\n" \
"To reply back, please use !reply <message>".format(self.bot.owner,msg)
await user.send(message)
await ctx.send(u"\U0001F44C")
@commands.command(hidden = True)
async def reply(self,ctx,*,msg):
channel = self.bot.get_channel(295075318430040065)
if channel is None:
return await ctx.send("Appear so, reply system is down...")
embed = discord.Embed()
embed.set_author(name = ctx.message.author,icon_url=ctx.message.author.avatar_url)
embed.add_field(name = "Author",value = "**ID**:{0.author.id}".format(ctx.message))
embed.add_field(name = "Reply",value = msg,inline=False)
await channel.send(embed=embed)
await ctx.send(u"\U0001F44C")
def setup(bot):
bot.add_cog(Core(bot))
| Maverun/Nurevam | Bot/cogs/core.py | Python | mit | 8,761 |
from django.db import models
from Counsellee.models import CounselleeDetails
from Counsellor.models import CounsellorDetails
from django.contrib.auth.models import User
from WebServer.settings import MEDIA_ROOT
class SessionDetails(models.Model):
sessionID = models.AutoField(primary_key=True)
counselleeID = models.ForeignKey(CounselleeDetails, on_delete=models.CASCADE)
counsellorID = models.ForeignKey(CounsellorDetails, on_delete=models.CASCADE)
isCompleted = models.BooleanField(default=False)
sessionDate = models.DateField()
problem = models.CharField(max_length=100)
description = models.TextField()
reports = models.FileField(upload_to=MEDIA_ROOT)
counselleefiles = models.FileField(null=True, upload_to= MEDIA_ROOT)
def __str__(self):
return "session ID: "+str(self.sessionID)+" Counsellee ID: "+str(self.counselleeID)+" Counsellor ID: "+str(self.counsellorID) | sachinkum/Bal-Aveksha | WebServer/Sessions/models.py | Python | gpl-3.0 | 922 |
"""
WSGI config for patchwatcher2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "patchwatcher2.settings")
application = get_wsgi_application()
| LuyaoHuang/patchwatcher | patchwatcher2/patchwatcher2/wsgi.py | Python | lgpl-3.0 | 403 |
# -*- coding: utf-8 -*-
import sys
import scrapy
import time
import re
from datetime import datetime
from scrapy.http import Request, FormRequest, TextResponse
from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy import log
from ..items import CgmhItem
#from scrapy.stats import Stats
class CgmhkhMED(scrapy.Spider):
name = "cgmhkhMED"
allowed_domains = ["org.tw"]
start_urls = [
"https://www.cgmh.org.tw/register/RMSTimeTable.aspx?dpt=81100A81A00A83680A"
]
def parse(self, response):
request = Request(response.url, callback = self.parse_table)
yield request
def parse_table(self, response):
items = []
sel = Selector(response)
#print sel.extract()
tables = sel.xpath('//table[@class="tableStyle"]/tr')
print 'len of table = '+ str(len(tables))
for t in range(len(tables)-1):
##每個table看有幾個row
table = tables[t+1].xpath('.//td')
#print "list: " + str(t) + "," + table[0].extract()
for column in range(3):
#print table[column+1].extract()
br = table[column+1].extract().split('<br>')
#當一個欄位有兩個人的時候,用br分開
for b in range(len(br)-1):
item = CgmhItem()
n = Selector(text = br[b])
nameFull = n.xpath('.//span/text()').extract()
if (nameFull != []):
name = re.sub('\d+', '', nameFull[0])
item['name'] = name.strip()
status = n.xpath('.//font/text()')[0].extract()
if (status == u'(額滿)'):
item['full'] = '名額已滿'
try:
item['link'] = 'https://www.cgmh.org.tw/register/' + n.xpath('.//a/@href')[0].extract()
except Exception as e:
pass
elif (status == u'(停診)'):
continue
else:
name = re.sub('\d+', '', n.xpath('.//a/text()')[0].extract())
item['name'] = name.strip()
item['full'] = '可掛號'
item['link'] = 'https://www.cgmh.org.tw/register/' + n.xpath('.//a/@href')[0].extract()
if (column==0):
item['time'] = 'morning'
if (column==1):
item['time'] = 'afternoon'
if (column==2):
item['time'] = 'evening'
##將中文的年月日轉換成yyddmm
date = table[0].xpath('.//text()')[0].extract().encode('utf-8')
dateFormat = '%Y年%m月%d日'
date = time.strptime(date, dateFormat)
item['date'] = time.strftime("%Y%m%d", date)
item['crawlTime'] = unicode(datetime.now().strftime("%Y%m%d %H:%M"))
item['hospital'] = 'cgmh_kh'
item['dept'] = 'MED'
item['title'] = sel.xpath('.//span[@id="ctl00_ContentPlaceHolder1_lbDptTitle"]//span/text()')[0].extract()
items.append(item)
return items | bryanyang0528/HospitalCrawler | cgmh_kh/cgmh/spiders/cgmh_med.py | Python | gpl-2.0 | 2,695 |
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_profile_oneconnect import (
ApiParameters, ModuleParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
maximum_size=100,
maximum_age=200,
maximum_reuse=300,
idle_timeout_override=20,
limit_type='strict'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.maximum_size == 100
assert p.maximum_age == 200
assert p.maximum_reuse == 300
assert p.idle_timeout_override == 20
assert p.limit_type == 'strict'
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_oneconnect_1.json')
p = ApiParameters(params=args)
assert p.name == 'oneconnect'
assert p.maximum_reuse == 1000
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_profile_oneconnect.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_profile_oneconnect.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
maximum_reuse=1000,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['maximum_reuse'] == 1000
| F5Networks/f5-ansible | ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_profile_oneconnect.py | Python | gpl-3.0 | 3,565 |
#
# Copyright 2016-2019 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from gafferpy import gaffer as g
from gafferpy import gaffer_connector
def run(host, verbose=False):
return run_with_connector(create_connector(host, verbose))
def run_with_connector(gc):
print()
print('Running operations')
print('--------------------------')
print()
get_schema(gc)
get_filter_functions(gc)
get_class_filter_functions(gc)
get_element_generators(gc)
get_object_generators(gc)
get_operations(gc)
get_serialised_fields(gc)
get_store_traits(gc)
is_operation_supported(gc)
add_elements(gc)
get_elements(gc)
get_adj_seeds(gc)
get_all_elements(gc)
get_walks(gc)
generate_elements(gc)
generate_domain_objs(gc)
generate_domain_objects_chain(gc)
get_element_group_counts(gc)
get_sub_graph(gc)
export_to_gaffer_result_cache(gc)
get_job_details(gc)
get_all_job_details(gc)
add_named_operation(gc)
get_all_named_operations(gc)
named_operation(gc)
delete_named_operation(gc)
add_named_view_summarise(gc)
add_named_view_date_range(gc)
get_all_named_views(gc)
named_view_summarise(gc)
named_view_date_range(gc)
named_views(gc)
delete_named_views(gc)
sort_elements(gc)
max_element(gc)
min_element(gc)
to_vertices_to_entity_seeds(gc)
complex_op_chain(gc)
op_chain_in_json(gc)
def create_connector(host, verbose=False):
return gaffer_connector.GafferConnector(host, verbose)
def get_schema(gc):
# Get Schema
result = gc.execute_get(
g.GetSchema()
)
print('Schema:')
print(result)
print()
def get_filter_functions(gc):
# Get filter functions
result = gc.execute_get(
g.GetFilterFunctions()
)
print('Filter Functions:')
print(result)
print()
def get_class_filter_functions(gc):
# Get class filter functions
class_name = 'uk.gov.gchq.koryphe.impl.predicate.IsMoreThan'
result = gc.execute_get(
g.GetClassFilterFunctions(class_name=class_name)
)
print('Class Filter Functions (IsMoreThan):')
print(result)
print()
def get_element_generators(gc):
# Get Element generators
result = gc.execute_get(
g.GetElementGenerators()
)
print('Element generators:')
print(result)
print()
def get_object_generators(gc):
# Get Object generators
result = gc.execute_get(
g.GetObjectGenerators()
)
print('Object generators:')
print(result)
print()
def get_operations(gc):
# Get operations
result = gc.execute_get(
g.GetOperations()
)
print('Operations:')
print(result)
print()
def get_serialised_fields(gc):
# Get serialised fields
class_name = 'uk.gov.gchq.koryphe.impl.predicate.IsMoreThan'
result = gc.execute_get(
g.GetSerialisedFields(class_name=class_name)
)
print('Serialised Fields (IsMoreThan):')
print(result)
print()
def get_store_traits(gc):
# Get Store Traits
result = gc.execute_get(
g.GetStoreTraits()
)
print('Store Traits:')
print(result)
print()
def is_operation_supported(gc):
# Is operation supported
operation = 'uk.gov.gchq.gaffer.operation.impl.add.AddElements'
result = gc.is_operation_supported(
g.IsOperationSupported(operation=operation)
)
print(
'\nOperation supported ("uk.gov.gchq.gaffer.operation.impl.add.AddElements"):')
print(result)
print()
def add_elements(gc):
# Add Elements
gc.execute_operation(
g.AddElements(
input=[
g.Entity(
group='JunctionUse',
vertex='M1:1',
properties={
'countByVehicleType': g.freq_map({
'BUS': 10,
'CAR': 50
}),
'endDate': g.date(1034319600000),
'count': g.long(60),
'startDate': g.date(1034316000000)
}
),
g.Edge(
group='RoadHasJunction',
source='M1',
destination='M1:1',
directed=True,
properties={}
)
]
)
)
print('Elements have been added')
print()
def get_elements(gc):
# Get Elements
input = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed('M5:10'),
# Edge input can be provided as follows
g.EdgeSeed('M5:10', 'M5:11', g.DirectedType.EITHER),
g.EdgeSeed('M5:10', 'M5:11', g.DirectedType.DIRECTED),
# Or you can use True or False for the direction
g.EdgeSeed('M5:10', 'M5:11', True)
],
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[],
transient_properties=[
g.Property('description', 'java.lang.String')
],
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=['count'],
predicate=g.IsMoreThan(
value=g.long(1)
)
)
],
transform_functions=[
g.FunctionContext(
selection=['SOURCE', 'DESTINATION', 'count'],
function=g.Function(
class_name='uk.gov.gchq.gaffer.traffic.transform.DescriptionTransform'
),
projection=['description']
)
]
)
]
),
directed_type=g.DirectedType.EITHER
)
)
print('Related input')
print(input)
print()
def get_adj_seeds(gc):
# Adjacent Elements - chain 2 adjacent entities together
adj_seeds = gc.execute_operations(
[
g.GetAdjacentIds(
input=[
g.EntitySeed(
vertex='M5'
)
],
view=g.View(
edges=[
g.ElementDefinition(
'RoadHasJunction',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.GetAdjacentIds(
view=g.View(
edges=[
g.ElementDefinition(
'RoadUse',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
)
]
)
print('Adjacent entities - 2 hop')
print(adj_seeds)
print()
def get_all_elements(gc):
# Get all input, but limit the total results to 3
all_elements = gc.execute_operations(
operations=[
g.GetAllElements(),
g.Limit(result_limit=3)
]
)
print('All input (Limited to first 3)')
print(all_elements)
print()
def get_walks(gc):
# Get walks from M32 traversing down RoadHasJunction then JunctionLocatedAt
walks = gc.execute_operation(
g.GetWalks(
input=[
g.EntitySeed('M32'),
],
operations=[
g.GetElements(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadHasJunction'
)
]
)
),
g.GetElements(
view=g.View(
edges=[
g.ElementDefinition(
group='JunctionLocatedAt'
)
]
)
)
]
)
)
print(
'Walks from M32 traversing down RoadHasJunction then JunctionLocatedAt')
print(walks)
print()
def generate_elements(gc):
# Generate Elements
input = gc.execute_operation(
g.GenerateElements(
element_generator=g.ElementGenerator(
class_name='uk.gov.gchq.gaffer.traffic.generator.RoadTrafficStringElementGenerator'
),
input=[
'"South West","E06000054","Wiltshire","6016","389200","179080","M4","LA Boundary","381800","180030","17","391646","179560","TM","E","2000","2000-05-03 00:00:00","7","0","9","2243","15","426","127","21","20","37","106","56","367","3060"'
]
)
)
print('Generated input from provided domain input')
print(input)
print()
def generate_domain_objs(gc):
# Generate Domain Objects - single provided element
input = gc.execute_operation(
g.GenerateObjects(
element_generator=g.ElementGenerator(
class_name='uk.gov.gchq.gaffer.rest.example.ExampleDomainObjectGenerator'
),
input=[
g.Entity('entity', '1'),
g.Edge('edge', '1', '2', True)
]
)
)
print('Generated input from provided input')
print(input)
print()
def generate_domain_objects_chain(gc):
# Generate Domain Objects - chain of get input then generate input
input = gc.execute_operations(
[
g.GetElements(
input=[g.EntitySeed(vertex='M5')],
seed_matching_type=g.SeedMatchingType.RELATED,
view=g.View(
edges=[
g.ElementDefinition(
group='RoadHasJunction',
group_by=[]
)
]
)
),
g.GenerateObjects(
element_generator=g.ElementGenerator(
class_name='uk.gov.gchq.gaffer.rest.example.ExampleDomainObjectGenerator'
)
)
]
)
print('Generated input from get input by seed')
print(input)
print()
def get_element_group_counts(gc):
# Get Elements
group_counts = gc.execute_operations([
g.GetElements(
input=[g.EntitySeed('M5')]
),
g.CountGroups(limit=1000)
])
print('Groups counts (limited to 1000 input)')
print(group_counts)
print()
def get_sub_graph(gc):
# Export and Get to/from an in memory set
entity_seeds = gc.execute_operations(
[
g.GetAdjacentIds(
input=[g.EntitySeed('South West')],
include_incoming_out_going=g.InOutType.OUT
),
g.ExportToSet(),
g.GetAdjacentIds(include_incoming_out_going=g.InOutType.OUT),
g.ExportToSet(),
g.DiscardOutput(),
g.GetSetExport()
]
)
print('Export and Get to/from an in memory set')
print(entity_seeds)
print()
def export_to_gaffer_result_cache(gc):
# Export to Gaffer Result Cache and Get from Gaffer Result Cache
job_details = gc.execute_operations(
[
g.GetAdjacentIds(
input=[g.EntitySeed('South West')],
include_incoming_out_going=g.InOutType.OUT
),
g.ExportToGafferResultCache(),
g.DiscardOutput(),
g.GetJobDetails()
]
)
print('Export to Gaffer Result Cache. Job Details:')
print(job_details)
print()
job_id = job_details['jobId']
entity_seeds = gc.execute_operation(
g.GetGafferResultCacheExport(job_id=job_id),
)
print('Get Gaffer Result Cache Export.')
print(entity_seeds)
print()
def get_job_details(gc):
# Get all job details
job_details_initial = gc.execute_operations(
[
g.GetAdjacentIds(
input=[g.EntitySeed('1')],
),
g.ExportToGafferResultCache(),
g.DiscardOutput(),
g.GetJobDetails()
]
)
job_id = job_details_initial['jobId']
job_details = gc.execute_operation(
g.GetJobDetails(job_id=job_id),
)
print('Get job details')
print(job_details)
print()
def get_all_job_details(gc):
# Get all job details
all_job_details = gc.execute_operation(
g.GetAllJobDetails(),
)
print('Get all job details (just prints the first 3 results)')
print(all_job_details[:3])
print()
def delete_named_operation(gc):
gc.execute_operation(
g.DeleteNamedOperation('2-hop-with-limit')
)
print('Deleted named operation: 2-hop-with-limit')
print()
def add_named_operation(gc):
gc.execute_operation(
g.AddNamedOperation(
operation_chain={
"operations": [{
"class": "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"includeIncomingOutGoing": "OUTGOING"
}, {
"class": "uk.gov.gchq.gaffer.operation.impl.get.GetAdjacentIds",
"includeIncomingOutGoing": "OUTGOING"
}, {
"class": "uk.gov.gchq.gaffer.operation.impl.Limit",
"resultLimit": "${param1}"
}]
},
operation_name='2-hop-with-limit',
description='2 hop query with limit',
overwrite_flag=True,
read_access_roles=["read-user"],
write_access_roles=["write-user"],
parameters=[
g.NamedOperationParameter(
name="param1",
description="Limit param",
default_value=1,
value_class="java.lang.Long",
required=False
)
]
)
)
print('Added named operation: 2-hop-with-limit')
print()
def get_all_named_operations(gc):
namedOperations = gc.execute_operation(
g.GetAllNamedOperations()
)
print('Named operations')
print(namedOperations)
print()
def named_operation(gc):
result = gc.execute_operation(
g.NamedOperation(
operation_name='2-hop-with-limit',
parameters={
'param1': 2
},
input=[
g.EntitySeed('M5')
]
)
)
print('Execute named operation')
print(result)
print()
def delete_named_views(gc):
gc.execute_operation(
g.DeleteNamedView(name='summarise')
)
print('Deleted named view: summarise')
gc.execute_operation(
g.DeleteNamedView(name='dateRange')
)
print('Deleted named view: dateRange')
print()
def add_named_view_summarise(gc):
gc.execute_operation(
g.AddNamedView(
view=g.View(
global_elements=[
g.GlobalElementDefinition(group_by=[])
]
),
name='summarise',
description='Summarises all results (overrides the groupBy to an empty array).',
overwrite_flag=True
)
)
print('Added named view: summarise')
print()
def add_named_view_date_range(gc):
gc.execute_operation(
g.AddNamedView(
view=g.View(
global_elements=g.GlobalElementDefinition(
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=['startDate'],
predicate=g.InDateRange(
start='${start}',
end='${end}'
)
)
]
)
),
name='dateRange',
description='Filters results to a provided date range.',
overwrite_flag=True,
parameters=[
g.NamedViewParameter(
name="start",
description="A date string for the start of date range.",
value_class="java.lang.String",
required=False
),
g.NamedViewParameter(
name="end",
description="A date string for the end of the date range.",
value_class="java.lang.String",
required=False
)
]
)
)
print('Added named view: dateRange')
print()
def get_all_named_views(gc):
namedViews = gc.execute_operation(
g.GetAllNamedViews()
)
print('Named views')
print(namedViews)
print()
def named_view_summarise(gc):
result = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed(
vertex='M32:1'
)
],
view=g.NamedView(
name="summarise"
)
)
)
print('Execute get elements with summarised named view')
print(result)
print()
def named_view_date_range(gc):
result = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed(
vertex='M32:1'
)
],
view=g.NamedView(
name="dateRange",
parameters={
'start': '2005/05/03 06:00',
'end': '2005/05/03 09:00'
}
)
)
)
print('Execute get elements with date range named view')
print(result)
print()
def named_views(gc):
result = gc.execute_operation(
g.GetElements(
input=[
g.EntitySeed(
vertex='M32:1'
)
],
view=[
g.NamedView(
name="summarise"
),
g.NamedView(
name="dateRange",
parameters={
'start': '2005/05/03 06:00',
'end': '2005/05/03 09:00'
}
)
]
)
)
print('Execute get elements with summarised and date range named views')
print(result)
print()
def sort_elements(gc):
# Get sorted Elements
input = gc.execute_operations([
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[]
)
]
)
),
g.Sort(
comparators=[
g.ElementPropertyComparator(
groups=['RoadUse'],
property='count'
)
],
result_limit=5
)
])
print('Sorted input')
print(input)
print()
def max_element(gc):
# Get sorted Elements
input = gc.execute_operations([
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[]
)
]
)
),
g.Max(
comparators=[
g.ElementPropertyComparator(
groups=['RoadUse'],
property='count'
)
]
)
])
print('Max element')
print(input)
print()
def min_element(gc):
# Get sorted Elements
input = gc.execute_operations([
g.GetAllElements(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadUse',
group_by=[]
)
]
)
),
g.Min(
comparators=[
g.ElementPropertyComparator(
groups=['RoadUse'],
property='count'
)
]
)
])
print('Min element')
print(input)
print()
def to_vertices_to_entity_seeds(gc):
# Get sorted Elements
input = gc.execute_operations([
g.GetElements(
input=[
g.EntitySeed(
vertex='South West'
)
],
view=g.View(
edges=[
g.ElementDefinition(
'RegionContainsLocation',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.ToVertices(
edge_vertices=g.EdgeVertices.DESTINATION,
use_matched_vertex=g.UseMatchedVertex.OPPOSITE
),
g.ToEntitySeeds(),
g.GetElements(
view=g.View(
edges=[
g.ElementDefinition(
'LocationContainsRoad',
group_by=[]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.Limit(5)
])
print('ToVertices then ToEntitySeeds')
print(input)
print()
def complex_op_chain(gc):
# All road junctions in the South West that were heavily used by buses in year 2000.
junctions = gc.execute_operations(
operations=[
g.GetAdjacentIds(
input=[g.EntitySeed(vertex='South West')],
view=g.View(
edges=[
g.ElementDefinition(
group='RegionContainsLocation',
group_by=[]
)
]
)
),
g.GetAdjacentIds(
view=g.View(
edges=[
g.ElementDefinition(
group='LocationContainsRoad',
group_by=[]
)
]
)
),
g.ToSet(),
g.GetAdjacentIds(
view=g.View(
edges=[
g.ElementDefinition(
group='RoadHasJunction',
group_by=[]
)
]
)
),
g.GetElements(
view=g.View(
entities=[
g.ElementDefinition(
group='JunctionUse',
group_by=[],
transient_properties=[
g.Property('busCount', 'java.lang.Long')
],
pre_aggregation_filter_functions=[
g.PredicateContext(
selection=['startDate'],
predicate=g.InDateRange(
start='2000/01/01',
end='2001/01/01'
)
)
],
post_aggregation_filter_functions=[
g.PredicateContext(
selection=['countByVehicleType'],
predicate=g.PredicateMap(
predicate=g.IsMoreThan(
value={'java.lang.Long': 1000},
or_equal_to=False
),
key='BUS'
)
)
],
transform_functions=[
g.FunctionContext(
selection=['countByVehicleType'],
function=g.FreqMapExtractor(key='BUS'),
projection=['busCount']
)
]
)
]
),
include_incoming_out_going=g.InOutType.OUT
),
g.ToCsv(
element_generator=g.CsvGenerator(
fields={
'VERTEX': 'Junction',
'busCount': 'Bus Count'
},
quoted=False
),
include_header=True
)
]
)
print(
'All road junctions in the South West that were heavily used by buses in year 2000.')
print(junctions)
print()
def op_chain_in_json(gc):
# Operation chain defined in json
result = gc.execute_operation_chain(
{
"class": "uk.gov.gchq.gaffer.operation.OperationChain",
"operations": [{
"class": "uk.gov.gchq.gaffer.operation.impl.get.GetAllElements",
}, {
"class": "uk.gov.gchq.gaffer.operation.impl.CountGroups"
}]
}
)
print('Operation chain defined in json')
print(result)
print()
if __name__ == "__main__":
run('http://localhost:8080/rest/latest', False)
| gchq/gaffer-tools | python-shell/src/example.py | Python | apache-2.0 | 26,676 |
#coding:utf-8
import os
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.plugins import PluginManager
from mailpile.crypto.gpgi import GnuPG
from mailpile.vcard import *
_plugins = PluginManager(builtin=__file__)
# User default GnuPG key file
DEF_GNUPG_HOME = os.path.expanduser('~/.gnupg')
class GnuPGImporter(VCardImporter):
FORMAT_NAME = 'GnuPG'
FORMAT_DESCRIPTION = _('Import contacts from GnuPG keyring')
SHORT_NAME = 'gpg'
CONFIG_RULES = {
'active': [_('Enable this importer'), bool, True],
'gpg_home': [_('Location of keyring'), 'path', DEF_GNUPG_HOME],
}
VCL_KEY_FMT = 'data:application/x-pgp-fingerprint,%s'
MERGE_BY = ['key', 'email'] # Merge by Key ID first, email if that fails
UPDATE_INDEX = True # Update the index's email->name mapping
def get_vcards(self):
if not self.config.active:
return []
gnupg = GnuPG(self.session.config)
keys = gnupg.list_keys()
results = []
vcards = {}
for key_id, key in keys.iteritems():
if (key.get("disabled") or key.get("revoked") or
not key["capabilities_map"].get("encrypt") or
not key["capabilities_map"].get("sign")):
continue
vcls = [VCardLine(name='KEY', value=self.VCL_KEY_FMT % key_id)]
card = None
emails = []
for uid in key.get('uids', []):
if uid.get('email'):
vcls.append(VCardLine(name='email', value=uid['email']))
card = card or vcards.get(uid['email'])
emails.append(uid['email'])
if uid.get('name'):
name = uid['name']
vcls.append(VCardLine(name='fn', value=name))
if card and emails:
card.add(*vcls)
elif emails:
# This is us taking care to only create one card for each
# set of e-mail addresses.
card = MailpileVCard(*vcls)
for email in emails:
vcards[email] = card
results.append(card)
return results
_plugins.register_vcard_importers(GnuPGImporter)
| jparyani/Mailpile | mailpile/plugins/vcard_gnupg.py | Python | apache-2.0 | 2,295 |
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('Fear199eq.png',0)
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = 'b')
plt.hist(img.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.savefig('Fear199eqhist.png')
| alee156/NeuroCV | Module1/opencv/histeq/histeqprep.py | Python | apache-2.0 | 422 |
from flask import url_for
from arrested import ArrestedAPI, Resource, Endpoint
def initialise_app_via_constructor(app):
"""Test instantiating ArrestedAPI obj passing flask app object directly
"""
api_v1 = ArrestedAPI(app)
assert api_v1.app == app
def defer_app_initialisation(app):
"""Test deferring initialising the flask app object using init_app method.
"""
api_v1 = ArrestedAPI()
api_v1.init_app(app)
assert api_v1.app == app
def test_register_resource(app):
"""Test that Resources are properly reigstered as a blueprint when
ArrestedAPI.register_resource is called.
"""
api_v1 = ArrestedAPI(app)
example_resource = Resource('example', __name__, url_prefix='/example')
api_v1.register_resource(example_resource)
assert app.blueprints == {'example': example_resource}
def test_register_all(app):
"""Test that Resources are properly reigstered as a blueprint when
ArrestedAPI.register_resource is called.
"""
api_v1 = ArrestedAPI(app)
example_resource = Resource('example', __name__, url_prefix='/example')
example_resource_2 = Resource('example_2', __name__, url_prefix='/example-2')
api_v1.register_all([example_resource, example_resource_2])
assert app.blueprints == {
'example': example_resource,
'example_2': example_resource_2
}
def test_defer_resource_registration(app):
"""Test that Resources are properly reigstered as a blueprint when
ArrestedAPI.register_resource is called.
"""
api_v1 = ArrestedAPI()
example_resource = Resource('example', __name__, url_prefix='/example')
example_resource_2 = Resource('example_2', __name__, url_prefix='/example-2')
api_v1.register_resource(example_resource, defer=True)
api_v1.register_resource(example_resource_2, defer=True)
assert app.blueprints == {}
api_v1.init_app(app)
assert app.blueprints == {
'example': example_resource,
'example_2': example_resource_2
}
def test_register_resource_with_url_prefix(app):
"""Test that the url_prefix is correctly applied to all resources when provided
"""
api_v1 = ArrestedAPI(app, url_prefix='/v1')
example_resource = Resource('example', __name__, url_prefix='/example')
class MyEndpoint(Endpoint):
name = 'test'
example_resource.add_endpoint(MyEndpoint)
api_v1.register_resource(example_resource)
assert url_for('example.test') == '/v1/example'
def test_api_request_middleware(app, client):
evts = []
def api_before_func(*args, **kwarsg):
evts.append('api_before')
return None
def api_after_func(endpoint, response):
response.data += b'|api_after'
evts.append('api_after')
return response
def resource_before_func(endpoint):
evts.append('resource_before')
return None
def resource_after_func(endpoint, response):
response.data += b'|resource_after'
evts.append('resource_after')
return response
api_v1 = ArrestedAPI(
app,
url_prefix='/v1',
before_all_hooks=[api_before_func],
after_all_hooks=[api_after_func]
)
example_resource = Resource(
'example', __name__,
url_prefix='/example',
before_all_hooks=[resource_before_func],
after_all_hooks=[resource_after_func]
)
class MyEndpoint(Endpoint):
name = 'test'
def get(self, *args, **kwargs):
assert 'api_before' in evts
assert 'api_after' not in evts
assert 'resource_before' in evts
assert 'resource_after' not in evts
return 'request'
example_resource.add_endpoint(MyEndpoint)
api_v1.register_resource(
example_resource,
)
resp = client.get(url_for('example.test'))
assert resp.data == b'request|resource_after|api_after'
assert evts == ['api_before', 'resource_before', 'resource_after', 'api_after']
def test_api_request_middleware_limited_to_api(app, client):
evts = []
def api_before_func(*args, **kwarsg):
evts.append('api_before')
return None
def api_after_func(endpoint, response):
response.data += b'|api_after'
evts.append('api_after')
return response
def resource_before_func(endpoint):
evts.append('resource_before')
return None
def resource_after_func(endpoint, response):
response.data += b'|resource_after'
evts.append('resource_after')
return response
api_v1 = ArrestedAPI(
app,
url_prefix='/v1',
before_all_hooks=[api_before_func],
after_all_hooks=[api_after_func]
)
api_v2 = ArrestedAPI(
app,
url_prefix='/v2',
)
example_resource = Resource(
'example', __name__,
url_prefix='/example',
before_all_hooks=[resource_before_func],
after_all_hooks=[resource_after_func]
)
example2_resource = Resource(
'example2', __name__,
url_prefix='/example2'
)
class MyEndpoint(Endpoint):
name = 'test'
def get(self, *args, **kwargs):
assert 'api_before' not in evts
assert 'api_after' not in evts
assert 'resource_before' not in evts
assert 'resource_after' not in evts
return 'request'
example_resource.add_endpoint(MyEndpoint)
example2_resource.add_endpoint(MyEndpoint)
api_v1.register_resource(
example_resource,
)
api_v2.register_resource(
example2_resource,
)
resp = client.get(url_for('example2.test'))
assert resp.data == b'request'
| mikeywaites/flask-arrested | tests/test_api.py | Python | mit | 5,699 |
# -*- encoding: utf-8 -*-
from django.conf.urls import url
from .views import (
ContactInvoiceListView,
ContactTimeRecordListView,
invoice_download,
InvoiceContactCreateView,
InvoiceContactUpdateView,
InvoiceDetailView,
InvoiceDraftCreateView,
InvoiceLineCreateView,
InvoiceLineUpdateView,
InvoiceListView,
InvoicePdfUpdateView,
InvoiceRefreshTimeRecordsUpdateView,
InvoiceRemoveTimeRecordsUpdateView,
InvoiceSetToDraftUpdateView,
InvoiceTimeCreateView,
InvoiceUpdateView,
InvoiceUserUpdateView,
QuickTimeRecordCreateView,
QuickTimeRecordDeleteView,
QuickTimeRecordListView,
QuickTimeRecordUpdateView,
report_invoice_time_analysis,
report_invoice_time_analysis_csv,
TicketListMonthView,
TicketTimeRecordListView,
TimeRecordCreateView,
TimeRecordListView,
TimeRecordSummaryUserView,
TimeRecordSummaryView,
TimeRecordUpdateView,
UserTimeRecordListView,
)
urlpatterns = [
url(regex=r'^contact/(?P<pk>\d+)/create/$',
view=InvoiceContactCreateView.as_view(),
name='invoice.contact.create'
),
url(regex=r'^contact/(?P<pk>\d+)/update/$',
view=InvoiceContactUpdateView.as_view(),
name='invoice.contact.update'
),
url(regex=r'^contact/(?P<pk>\d+)/invoice/$',
view=ContactInvoiceListView.as_view(),
name='invoice.contact.list'
),
url(regex=r'^contact/(?P<pk>\d+)/time/$',
view=ContactTimeRecordListView.as_view(),
name='invoice.time.contact.list'
),
url(regex=r'^create/(?P<pk>\d+)/draft/$',
view=InvoiceDraftCreateView.as_view(),
name='invoice.create.draft'
),
url(regex=r'^create/(?P<pk>\d+)/time/$',
view=InvoiceTimeCreateView.as_view(),
name='invoice.create.time'
),
url(regex=r'^$',
view=InvoiceListView.as_view(),
name='invoice.list'
),
url(regex=r'^download/(?P<pk>\d+)/$',
view=invoice_download,
name='invoice.download'
),
url(regex=r'^invoice/(?P<pk>\d+)/$',
view=InvoiceDetailView.as_view(),
name='invoice.detail'
),
url(regex=r'^invoice/(?P<pk>\d+)/line/create/$',
view=InvoiceLineCreateView.as_view(),
name='invoice.line.create'
),
url(regex=r'^invoice/(?P<pk>\d+)/line/update/$',
view=InvoiceLineUpdateView.as_view(),
name='invoice.line.update'
),
url(regex=r'^invoice/(?P<pk>\d+)/pdf/$',
view=InvoicePdfUpdateView.as_view(),
name='invoice.create.pdf'
),
url(regex=r'^invoice/(?P<pk>\d+)/refresh-time-records/$',
view=InvoiceRefreshTimeRecordsUpdateView.as_view(),
name='invoice.refresh.time.records'
),
url(regex=r'^invoice/(?P<pk>\d+)/remove-time-records/$',
view=InvoiceRemoveTimeRecordsUpdateView.as_view(),
name='invoice.remove.time.records'
),
url(regex=r'^invoice/(?P<pk>\d+)/report/$',
view=report_invoice_time_analysis,
name='invoice.report.time.analysis'
),
url(regex=r'^invoice/(?P<pk>\d+)/csv/$',
view=report_invoice_time_analysis_csv,
name='invoice.report.time.analysis_csv'
),
url(regex=r'^invoice/(?P<pk>\d+)/set-to-draft/$',
view=InvoiceSetToDraftUpdateView.as_view(),
name='invoice.set.to.draft'
),
url(regex=r'^invoice/(?P<pk>\d+)/update/$',
view=InvoiceUpdateView.as_view(),
name='invoice.update'
),
# quick time record
url(regex=r'^quick/time/record/$',
view=QuickTimeRecordListView.as_view(),
name='invoice.quick.time.record.list'
),
url(regex=r'^quick/time/record/create/$',
view=QuickTimeRecordCreateView.as_view(),
name='invoice.quick.time.record.create'
),
url(regex=r'^quick/time/record/(?P<pk>\d+)/update/$',
view=QuickTimeRecordUpdateView.as_view(),
name='invoice.quick.time.record.update'
),
url(regex=r'^quick/time/record/(?P<pk>\d+)/delete/$',
view=QuickTimeRecordDeleteView.as_view(),
name='invoice.quick.time.record.delete'
),
# settings
url(regex=r'^user/update/$',
view=InvoiceUserUpdateView.as_view(),
name='invoice.user.update'
),
# ticket
url(regex=r'^ticket/(?P<year>\d{4})/(?P<month>\d+)/$',
view=TicketListMonthView.as_view(),
name='invoice.ticket.list.month'
),
url(regex=r'^ticket/(?P<pk>\d+)/time/$',
view=TicketTimeRecordListView.as_view(),
name='invoice.time.ticket.list'
),
url(regex=r'^user/time/$',
view=UserTimeRecordListView.as_view(),
name='invoice.time.user.list'
),
url(regex=r'^ticket/(?P<pk>\d+)/time/add/$',
view=TimeRecordCreateView.as_view(),
name='invoice.time.create'
),
url(regex=r'^time/$',
view=TimeRecordListView.as_view(),
name='invoice.time'
),
url(regex=r'^time/summary/$',
view=TimeRecordSummaryView.as_view(),
name='invoice.time.summary'
),
url(regex=r'^time/summary/(?P<pk>\d+)/$',
view=TimeRecordSummaryUserView.as_view(),
name='invoice.time.summary.user'
),
url(regex=r'^time/(?P<pk>\d+)/update/$',
view=TimeRecordUpdateView.as_view(),
name='invoice.time.update'
),
]
| pkimber/invoice | invoice/urls.py | Python | apache-2.0 | 5,453 |
""" Useful and shared functions """
import os
import math
import numpy as np
from scipy.interpolate import interp1d
def chunks(l, size):
""" Return same size chunks in a list """
return [l[i:i+size] for i in range(0, len(l), size)]
def findLine(filename, s):
""" Return first encountered line from a file with matching string """
value = ''
with open(filename, "r") as f:
for line in f:
if s in line:
value = line
break
return value
def findMean(l):
""" Find the mean of a list """
return math.fsum(l) / len(l)
def replaceAll(text, reps):
""" Replace all the matching strings from a piece of text """
for i, j in reps.items():
text = text.replace(str(i), str(j))
return text
def ntabulate(matrix):
""" Return a nice tabulated string from a matrix """
s = [[str(e) for e in row] for row in matrix]
lens = [len(max(col, key=len)) for col in zip(*s)]
fmt = ' ' . join('{{:{}}}'.format(x) for x in lens)
return '\n' . join([fmt.format(*row) for row in s])
def float2str(prec, val):
""" Return a nicely formatted string from a float """
return '{val:.{prec}f}'.format(prec=prec, val=val)
def nexit():
""" Standard exit program function """
print('Exiting program...')
raise SystemExit
def seconds2str(s):
""" Return a nicely formatted time string from seconds """
seconds = str(int(s % 60))
minutes = str(int(s / 60) % 60)
hours = str(int(s / 3600))
return hours + 'h ' + minutes + 'm ' + seconds + 's'
def modFile(new, tmp, reps):
""" Copy and modify the specified file to a new location """
with open(new, 'w+') as fnew:
with open(tmp, 'r') as ftmp:
for line in ftmp:
fnew.write(replaceAll(line, reps))
def getDOS(filePath, spin):
""" Store into text file and return DOS data """
baseDir = os.path.dirname(os.path.abspath(filePath))
filename = os.path.basename(filePath).split('.')[0]
outFile = os.path.join(baseDir, filename + '_' + spin + '.txt')
dos = []
record = False
if spin == 'up':
dataStart = '@target G0.S0'
elif spin == 'down':
dataStart = '@target G1.S0'
else:
print('Incorrect spin.')
nexit()
with open(filePath) as f:
for l in f:
line = l.rstrip()
if line == dataStart:
record = True
continue
if line == '&':
record = False
continue
if record and not '@' in line:
x = float(line.split()[0])
y = float(line.split()[1])
dos.append([x, y])
if os.path.isfile(outFile) is True:
os.remove(outFile)
with open(outFile, 'a+') as f:
for x, y in dos:
f.write(str(x) + ' ' + str(y) + '\n')
return dos
def getBSF3D(filePath, spin, numSites):
""" Store into text file and return 3D BSF data """
baseDir = os.path.dirname(os.path.abspath(filePath))
bsfnum = os.path.basename(filePath).split('_')[-2]
if bsfnum.isdigit() == True:
outFile = os.path.join(baseDir, bsfnum + '_bsf3d_' + spin + '.txt')
else:
outFile = os.path.join(baseDir, 'bsf3d_' + spin + '.txt')
raw = []
hashCount = 0 # For determining when to start reading raw data.
# Get raw data first.
with open(filePath) as f:
for l in f:
line = l.rstrip()
if '###' in line:
hashCount += 1
continue
if hashCount == 3:
x = float(line.split()[0])
y = float(line.split()[1])
raw.append([x, y])
# Generate plotable data from raw
numUseful = (numSites - 1) * 2
nk = len(raw) / numUseful
nk2 = nk * nk
bsf = [[] for i in range(nk)]
if spin == 'up':
sign = -1
elif spin == 'down':
sign = 1
for i in range(nk2):
n = math.floor(i / nk)
j = i + (nk2 * numUseful - 2)
bsf[n].append(float(raw[i]) + sign * float(raw[j]))
if os.path.isfile(outFile) is True:
os.remove(outFile)
np.savetxt(outFile, bsf)
return bsf
def getBSF2D(filePath, spin, numSites):
""" Store into text file and return single strip of BSF data """
baseDir = os.path.dirname(os.path.abspath(filePath))
bsfnum = os.path.basename(filePath).split('_')[-2]
if bsfnum.isdigit() == True:
outFile = os.path.join(baseDir, bsfnum + '_bsf2d_' + spin + '.txt')
else:
outFile = os.path.join(baseDir, 'bsf2d_' + spin + '.txt')
bsf3D = getBSF3D(filePath, spin, numSites)
bsf = []
nk = len(bsf3D)
for i in range(nk):
bsf.append([ i / nk, bsf3D[0][i]])
if os.path.isfile(outFile) is True:
os.remove(outFile)
with open(outFile, 'a+') as f:
for x, y in bsf:
f.write(str(x) + ' ' + str(y) + '\n')
return bsf
def getInterp1d(data):
""" Get interpolated data from a list with x and y values """
x, y = [], []
for X, Y in data:
x.append(X)
y.append(Y)
return interp1d(x, y)
def normalise(inp):
""" Linearly normalise the input values to range from 0 to 1 """
normalised = []
xmin = min(inp)
xmax = max(inp)
for x in inp:
normalised.append((x - xmin) / (xmax - xmin))
return normalised
| nelsyeung/half-metals-analysis | lib/nmod.py | Python | mit | 5,429 |
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.i18n import _
from nova import servicegroup
authorize = extensions.extension_authorizer('compute', 'hypervisors')
def make_hypervisor(elem, detail):
elem.set('hypervisor_hostname')
elem.set('id')
elem.set('state')
elem.set('status')
if detail:
elem.set('vcpus')
elem.set('memory_mb')
elem.set('local_gb')
elem.set('vcpus_used')
elem.set('memory_mb_used')
elem.set('local_gb_used')
elem.set('hypervisor_type')
elem.set('hypervisor_version')
elem.set('free_ram_mb')
elem.set('free_disk_gb')
elem.set('current_workload')
elem.set('running_vms')
elem.set('cpu_info')
elem.set('disk_available_least')
elem.set('host_ip')
service = xmlutil.SubTemplateElement(elem, 'service',
selector='service')
service.set('id')
service.set('host')
service.set('disabled_reason')
class HypervisorIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, False)
return xmlutil.MasterTemplate(root, 1)
class HypervisorDetailTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, True)
return xmlutil.MasterTemplate(root, 1)
class HypervisorTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
make_hypervisor(root, True)
return xmlutil.MasterTemplate(root, 1)
class HypervisorUptimeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
make_hypervisor(root, False)
root.set('uptime')
return xmlutil.MasterTemplate(root, 1)
class HypervisorServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, False)
servers = xmlutil.SubTemplateElement(elem, 'servers')
server = xmlutil.SubTemplateElement(servers, 'server',
selector='servers')
server.set('name')
server.set('uuid')
return xmlutil.MasterTemplate(root, 1)
class HypervisorStatisticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor_statistics',
selector='hypervisor_statistics')
root.set('count')
root.set('vcpus')
root.set('memory_mb')
root.set('local_gb')
root.set('vcpus_used')
root.set('memory_mb_used')
root.set('local_gb_used')
root.set('free_ram_mb')
root.set('free_disk_gb')
root.set('current_workload')
root.set('running_vms')
root.set('disk_available_least')
return xmlutil.MasterTemplate(root, 1)
class HypervisorsController(object):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self, ext_mgr):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
self.ext_mgr = ext_mgr
def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
hyp_dict = {
'id': hypervisor['id'],
'hypervisor_hostname': hypervisor['hypervisor_hostname'],
}
ext_status_loaded = self.ext_mgr.is_loaded('os-hypervisor-status')
if ext_status_loaded:
alive = self.servicegroup_api.service_is_up(hypervisor['service'])
hyp_dict['state'] = 'up' if alive else "down"
hyp_dict['status'] = (
'disabled' if hypervisor['service']['disabled'] else 'enabled')
if detail and not servers:
fields = ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least')
ext_loaded = self.ext_mgr.is_loaded('os-extended-hypervisors')
if ext_loaded:
fields += ('host_ip',)
for field in fields:
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': hypervisor['service_id'],
'host': hypervisor['service']['host'],
}
if ext_status_loaded:
hyp_dict['service'].update(
disabled_reason=hypervisor['service']['disabled_reason'])
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
@wsgi.serializers(xml=HypervisorIndexTemplate)
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorDetailTemplate)
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(hyp, True)
for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return dict(hypervisor=self._view_hypervisor(hyp, True))
@wsgi.serializers(xml=HypervisorUptimeTemplate)
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp['service']['host']
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return dict(hypervisor=self._view_hypervisor(hyp, False,
uptime=uptime))
@wsgi.serializers(xml=HypervisorIndexTemplate)
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.serializers(xml=HypervisorServersTemplate)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node['service']['host'])
hyp = self._view_hypervisor(compute_node, False, instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
@wsgi.serializers(xml=HypervisorStatisticsTemplate)
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.ExtensionDescriptor):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = "os-hypervisors"
namespace = "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1"
updated = "2012-06-21T00:00:00Z"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hypervisors',
HypervisorsController(self.ext_mgr),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
| ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/api/openstack/compute/contrib/hypervisors.py | Python | gpl-2.0 | 10,930 |
from django.db import models
from edc_base.model_managers import HistoricalRecords
from edc_base.model_fields import OtherCharField
from edc_constants.choices import YES_NO
from ..choices import KEPT_APPT, TYPE_OF_EVIDENCE, REASON_RECOMMENDED
from .model_mixins import CrfModelMixin
class HivLinkageToCare (CrfModelMixin):
kept_appt = models.CharField(
verbose_name=(
'When we last saw you in {previous} we scheduled an appointment '
'for you in an HIV care clinic on {referral_appt_date}. '
'Did you keep that appointment?'),
max_length=50,
choices=KEPT_APPT,
null=True,
help_text='')
different_clinic = models.CharField(
verbose_name='If went to a different clinic, specify the clinic',
default=None,
null=True,
blank=True,
max_length=50,
help_text=''
)
failed_attempt_date = models.DateField(
verbose_name=(
'If you tried to attend an HIV care clinic and '
'left before you saw a healthcare provider, specify the date?'),
default=None,
null=True,
blank=True,
help_text=''
)
first_attempt_date = models.DateField(
verbose_name=('What was the date when you first went '
'to the community_name clinic?'),
default=None,
null=True,
blank=True,
help_text=''
)
evidence_referral = models.CharField(
verbose_name='Type of Evidence:',
max_length=50,
choices=TYPE_OF_EVIDENCE,
null=True,
help_text='')
evidence_referral_other = OtherCharField()
recommended_art = models.CharField(
verbose_name=(
'[IF PERSON WAS ART NAIVE OR A DEFAULTER AT LAST INTERVIEW] '
'Since the last time we spoke with '
'you on last_visit_date, has a doctor/nurse or '
'other healthcare worker recommended '
'that you start antiretroviral therapy (ARVs), a '
'combination of medicines to treat your HIV infection?'),
max_length=50,
choices=YES_NO,
null=True,
help_text='If No [SKIP TO #10]')
reason_recommended_art = models.CharField(
verbose_name='If yes, do you know why ARVs were recommended?',
max_length=50,
choices=REASON_RECOMMENDED,
null=True,
blank=True,
help_text='')
reason_recommended_art_other = OtherCharField()
initiated = models.CharField(
verbose_name=(
'[IF PERSON WAS ART NAIVE OR A DEFAULTER AT LAST INTERVIEW] '
'Did you [start/restart] ART since we '
'spoke with you on last_visit_date?'),
max_length=50,
choices=YES_NO,
null=True,
help_text='')
initiated_date = models.DateField(
verbose_name='When did you start/restart ART?',
default=None,
null=True,
blank=True,
help_text=''
)
initiated_clinic = models.CharField(
verbose_name='Which clinic facility did you start/restart ART at?',
max_length=25,
null=True,
blank=True,
help_text='Indicate the name of the clinic')
initiated_clinic_community = models.CharField(
verbose_name=('In which community is this clinic located'),
null=True,
blank=True,
max_length=50,
help_text='Indicate the community name'
)
evidence_art = models.CharField(
verbose_name='Type of Evidence:',
max_length=50,
choices=TYPE_OF_EVIDENCE,
null=True,
blank=True,
help_text='')
evidence_art_other = OtherCharField()
history = HistoricalRecords()
class Meta(CrfModelMixin.Meta):
app_label = 'bcpp_subject'
verbose_name = 'Hiv Linkage To Care'
verbose_name_plural = 'Hiv Linkage To Care'
| botswana-harvard/bcpp-subject | bcpp_subject/models/hiv_linkage_to_care.py | Python | gpl-3.0 | 3,927 |
from .defaults import DEFAULT_LOG_FILE
from twisted.logger import (
ILogObserver, jsonFileLogObserver, FilteringLogObserver,
LogLevelFilterPredicate, LogLevel, globalLogPublisher
)
from zope.interface import provider
import io
@provider(ILogObserver)
def hendrixObserver(path=DEFAULT_LOG_FILE, log_level=LogLevel.warn):
json_observer = jsonFileLogObserver(
io.open(path, 'a')
)
return FilteringLogObserver(
json_observer,
[LogLevelFilterPredicate(log_level), ]
)
globalLogPublisher.addObserver(hendrixObserver(log_level=LogLevel.debug))
| arabenjamin/hendrix | hendrix/logger.py | Python | mit | 590 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import proteindf_bridge as bridge
def main():
parser = argparse.ArgumentParser(description='parse mmCIF file')
parser.add_argument('mmCIF_FILE',
nargs=1,
help="mmCIF file")
parser.add_argument('-w', '--write',
nargs=1,
help="save message pack file")
parser.add_argument('-v', '--verbose',
action="store_true",
default=False)
args = parser.parse_args()
mmcif_file_path = args.mmCIF_FILE[0]
write_mpac_file_path = ""
if args.write:
write_mpac_file_path = args.write[0]
verbose = args.verbose
# load mmCIF file
if verbose:
print("reading: {}".format(mmcif_file_path))
mmcif = bridge.SimpleMmcif(mmcif_file_path)
if len(write_mpac_file_path) > 0:
if verbose:
print("write mpac file: {}".format(write_mpac_file_path))
mmcif.save_msgpack(write_mpac_file_path)
# print(mmcif)
if __name__ == '__main__':
main()
| ProteinDF/ProteinDF_bridge | scripts/mmcif2txt.py | Python | gpl-3.0 | 1,123 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
LOG = utils.getLogger(__name__)
class CryptoPluginNotFound(exception.BarbicanException):
"""Raised when no plugins are installed."""
message = u._("Crypto plugin not found.")
class CryptoKEKBindingException(exception.BarbicanException):
"""Raised when the bind_kek_metadata method from a plugin returns None."""
def __init__(self, plugin_name=u._('Unknown')):
super(CryptoKEKBindingException, self).__init__(
u._('Failed to bind kek metadata for '
'plugin: {name}').format(name=plugin_name)
)
self.plugin_name = plugin_name
class CryptoPrivateKeyFailureException(exception.BarbicanException):
"""Raised when could not generate private key."""
def __init__(self):
super(CryptoPrivateKeyFailureException, self).__init__(
u._('Could not generate private key')
)
# TODO(john-wood-w) Need to harmonize these lower-level constants with the
# higher level constants in secret_store.py.
class PluginSupportTypes(object):
"""Class to hold the type enumeration that plugins may support."""
ENCRYPT_DECRYPT = "ENCRYPT_DECRYPT"
SYMMETRIC_KEY_GENERATION = "SYMMETRIC_KEY_GENERATION"
# A list of symmetric algorithms that are used to determine type of key gen
SYMMETRIC_ALGORITHMS = ['aes', 'des', '3des', 'hmacsha1',
'hmacsha256', 'hmacsha384', 'hmacsha512']
SYMMETRIC_KEY_LENGTHS = [64, 128, 192, 256]
ASYMMETRIC_KEY_GENERATION = "ASYMMETRIC_KEY_GENERATION"
ASYMMETRIC_ALGORITHMS = ['rsa', 'dsa']
ASYMMETRIC_KEY_LENGTHS = [1024, 2048, 4096]
class KEKMetaDTO(object):
"""Key Encryption Key Meta DTO
Key Encryption Keys (KEKs) in Barbican are intended to represent a
distinct key that is used to perform encryption on secrets for a particular
project.
``KEKMetaDTO`` objects are provided to cryptographic backends by Barbican
to allow plugins to persist metadata related to the project's KEK.
For example, a plugin that interfaces with a Hardware Security Module (HSM)
may want to use a different encryption key for each project. Such a plugin
could use the ``KEKMetaDTO`` object to save the key ID used for that
project. Barbican will persist the KEK metadata and ensure that it is
provided to the plugin every time a request from that same project is
processed.
.. attribute:: plugin_name
String attribute used by Barbican to identify the plugin that is bound
to the KEK metadata. Plugins should not change this attribute.
.. attribute:: kek_label
String attribute used to label the project's KEK by the plugin.
The value of this attribute should be meaningful to the plugin.
Barbican does not use this value.
.. attribute:: algorithm
String attribute used to identify the encryption algorithm used by the
plugin. e.g. "AES", "3DES", etc. This value should be meaningful to
the plugin. Barbican does not use this value.
.. attribute:: mode
String attribute used to identify the algorithm mode used by the
plugin. e.g. "CBC", "GCM", etc. This value should be meaningful to
the plugin. Barbican does not use this value.
.. attribute:: bit_length
Integer attribute used to identify the bit length of the KEK by the
plugin. This value should be meaningful to the plugin. Barbican does
not use this value.
.. attribute:: plugin_meta
String attribute used to persist any additional metadata that does not
fit in any other attribute. The value of this attribute is defined by
the plugin. It could be used to store external system references, such
as Key IDs in an HSM, URIs to an external service, or any other data
that the plugin deems necessary to persist. Because this is just a
plain text field, a plug in may even choose to persist data such as key
value pairs in a JSON object.
"""
def __init__(self, kek_datum):
"""Plugins should not have to create their own instance of this class.
kek_datum is typically a barbican.model.models.KEKDatum instance.
"""
self.kek_label = kek_datum.kek_label
self.plugin_name = kek_datum.plugin_name
self.algorithm = kek_datum.algorithm
self.bit_length = kek_datum.bit_length
self.mode = kek_datum.mode
self.plugin_meta = kek_datum.plugin_meta
class GenerateDTO(object):
"""Secret Generation DTO
Data Transfer Object used to pass all the necessary data for the plugin
to generate a secret on behalf of the user.
.. attribute:: generation_type
String attribute used to identify the type of secret that should be
generated. This will be either ``"symmetric"`` or ``"asymmetric"``.
.. attribute:: algorithm
String attribute used to specify what type of algorithm the secret will
be used for. e.g. ``"AES"`` for a ``"symmetric"`` type, or ``"RSA"``
for ``"asymmetric"``.
.. attribute:: mode
String attribute used to specify what algorithm mode the secret will be
used for. e.g. ``"CBC"`` for ``"AES"`` algorithm.
.. attribute:: bit_length
Integer attribute used to specify the bit length of the secret. For
example, this attribute could specify the key length for an encryption
key to be used in AES-CBC.
"""
def __init__(self, algorithm, bit_length, mode, passphrase=None):
self.algorithm = algorithm
self.bit_length = bit_length
self.mode = mode
self.passphrase = passphrase
class ResponseDTO(object):
"""Data transfer object for secret generation response.
Barbican guarantees that both the ``cypher_text`` and
``kek_metadata_extended`` will be persisted and then given back to
the plugin when requesting a decryption operation.
``kek_metadata_extended`` takes the idea of Key Encryption Key
(KEK) metadata further by giving plugins the option to store
secret-level KEK metadata. One example of using secret-level KEK
metadata would be plugins that want to use a unique KEK for every
secret that is encrypted. Such a plugin could use
``kek_metadata_extended`` to store the Key ID for the KEK used to
encrypt this particular secret.
:param cypher_text: Byte data resulting from the encryption of the
secret data.
:param kek_meta_extended: Optional String object to be persisted alongside
the cyphertext.
"""
def __init__(self, cypher_text, kek_meta_extended=None):
self.cypher_text = cypher_text
self.kek_meta_extended = kek_meta_extended
class DecryptDTO(object):
"""Secret Decryption DTO
Data Transfer Object used to pass all the necessary data for the plugin
to perform decryption of a secret.
Currently, this DTO only contains the data produced by the plugin during
encryption, but in the future this DTO will contain more information, such
as a transport key for secret wrapping back to the client.
.. attribute:: encrypted
The data that was produced by the plugin during encryption. For some
plugins this will be the actual bytes that need to be decrypted to
produce the secret. In other implementations, this may just be a
reference to some external system that can produce the unencrypted
secret.
"""
def __init__(self, encrypted):
self.encrypted = encrypted
class EncryptDTO(object):
"""Secret Encryption DTO
Data Transfer Object used to pass all the necessary data for the plugin
to perform encryption of a secret.
Currently, this DTO only contains the raw bytes to be encrypted by the
plugin, but in the future this may contain more information.
.. attribute:: unencrypted
The secret data in Bytes to be encrypted by the plugin.
"""
def __init__(self, unencrypted):
self.unencrypted = unencrypted
@six.add_metaclass(abc.ABCMeta)
class CryptoPluginBase(object):
"""Base class for all Crypto plugins.
Barbican requests operations by invoking the methods on an instance of the
implementing class. Barbican's plugin manager handles the life-cycle of
the Data Transfer Objects (DTOs) that are passed into these methods, and
persist the data that is assigned to these DTOs by the plugin.
"""
@abc.abstractmethod
def encrypt(self, encrypt_dto, kek_meta_dto, project_id):
"""Encryption handler function
This method will be called by Barbican when requesting an encryption
operation on a secret on behalf of a project.
:param encrypt_dto: :class:`EncryptDTO` instance containing the raw
secret byte data to be encrypted.
:type encrypt_dto: :class:`EncryptDTO`
:param kek_meta_dto: :class:`KEKMetaDTO` instance containing
information about the project's Key Encryption Key (KEK) to be
used for encryption. Plugins may assume that binding via
:meth:`bind_kek_metadata` has already taken place before this
instance is passed in.
:type kek_meta_dto: :class:`KEKMetaDTO`
:param project_id: Project ID associated with the unencrypted data.
:return: A response DTO containing the cyphertext and KEK information.
:rtype: :class:`ResponseDTO`
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def decrypt(self, decrypt_dto, kek_meta_dto, kek_meta_extended,
project_id):
"""Decrypt encrypted_datum in the context of the provided project.
:param decrypt_dto: data transfer object containing the cyphertext
to be decrypted.
:param kek_meta_dto: Key encryption key metadata to use for decryption
:param kek_meta_extended: Optional per-secret KEK metadata to use for
decryption.
:param project_id: Project ID associated with the encrypted datum.
:returns: str -- unencrypted byte data
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def bind_kek_metadata(self, kek_meta_dto):
"""Key Encryption Key Metadata binding function
Bind a key encryption key (KEK) metadata to the sub-system
handling encryption/decryption, updating information about the
key encryption key (KEK) metadata in the supplied 'kek_metadata'
data-transfer-object instance, and then returning this instance.
This method is invoked prior to the encrypt() method above.
Implementors should fill out the supplied 'kek_meta_dto' instance
(an instance of KEKMetadata above) as needed to completely describe
the kek metadata and to complete the binding process. Barbican will
persist the contents of this instance once this method returns.
:param kek_meta_dto: Key encryption key metadata to bind, with the
'kek_label' attribute guaranteed to be unique, and the
and 'plugin_name' attribute already configured.
:returns: kek_meta_dto: Returns the specified DTO, after
modifications.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_symmetric(self, generate_dto, kek_meta_dto, project_id):
"""Generate a new key.
:param generate_dto: data transfer object for the record
associated with this generation request. Some relevant
parameters can be extracted from this object, including
bit_length, algorithm and mode
:param kek_meta_dto: Key encryption key metadata to use for decryption
:param project_id: Project ID associated with the data.
:returns: An object of type ResponseDTO containing encrypted data and
kek_meta_extended, the former the resultant cypher text, the latter
being optional per-secret metadata needed to decrypt (over and
above the per-project metadata managed outside of the plugins)
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_asymmetric(self, generate_dto, kek_meta_dto, project_id):
"""Create a new asymmetric key.
:param generate_dto: data transfer object for the record
associated with this generation request. Some relevant
parameters can be extracted from this object, including
bit_length, algorithm and passphrase
:param kek_meta_dto: Key encryption key metadata to use for decryption
:param project_id: Project ID associated with the data.
:returns: A tuple containing objects for private_key, public_key and
optionally one for passphrase. The objects will be of type
ResponseDTO.
Each object containing encrypted data and kek_meta_extended, the
former the resultant cypher text, the latter being optional
per-secret metadata needed to decrypt (over and above the
per-project metadata managed outside of the plugins)
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def supports(self, type_enum, algorithm=None, bit_length=None, mode=None):
"""Used to determine if the plugin supports the requested operation.
:param type_enum: Enumeration from PluginSupportsType class
:param algorithm: String algorithm name if needed
"""
raise NotImplementedError # pragma: no cover
| jmvrbanac/barbican | barbican/plugin/crypto/crypto.py | Python | apache-2.0 | 14,375 |
from django import forms
import cekolabs_django_widgets
class TagField(forms.Field):
widget = cekolabs_django_widgets.widgets.TagInput | ceko/cekolabs_django_widgets | cekolabs_django_widgets/fields.py | Python | mit | 140 |
import datetime
import json
import mimetypes
import os
import re
import sys
import time
from email.header import Header
from http.client import responses
from urllib.parse import quote, urlparse
from django.conf import settings
from django.core import signals, signing
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.http.cookie import SimpleCookie
from django.utils import timezone
from django.utils.encoding import iri_to_uri
from django.utils.http import http_date
from django.utils.regex_helper import _lazy_re_compile
_charset_from_content_type_re = _lazy_re_compile(r';\s*charset=(?P<charset>[^\s;]+)', re.I)
class BadHeaderError(ValueError):
pass
class HttpResponseBase:
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
def __init__(self, content_type=None, status=None, reason=None, charset=None):
# _headers is a mapping of the lowercase name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._resource_closers = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
self.cookies = SimpleCookie()
self.closed = False
if status is not None:
try:
self.status_code = int(status)
except (ValueError, TypeError):
raise TypeError('HTTP status code must be an integer.')
if not 100 <= self.status_code <= 599:
raise ValueError('HTTP status code must be an integer from 100 to 599.')
self._reason_phrase = reason
self._charset = charset
if content_type is None:
content_type = 'text/html; charset=%s' % self.charset
self['Content-Type'] = content_type
@property
def reason_phrase(self):
if self._reason_phrase is not None:
return self._reason_phrase
# Leave self._reason_phrase unset in order to use the default
# reason phrase for status code.
return responses.get(self.status_code, 'Unknown Status Code')
@reason_phrase.setter
def reason_phrase(self, value):
self._reason_phrase = value
@property
def charset(self):
if self._charset is not None:
return self._charset
content_type = self.get('Content-Type', '')
matched = _charset_from_content_type_re.search(content_type)
if matched:
# Extract the charset and strip its double quotes
return matched['charset'].replace('"', '')
return settings.DEFAULT_CHARSET
@charset.setter
def charset(self, value):
self._charset = value
def serialize_headers(self):
"""HTTP headers as a bytestring."""
def to_bytes(val, encoding):
return val if isinstance(val, bytes) else val.encode(encoding)
headers = [
(to_bytes(key, 'ascii') + b': ' + to_bytes(value, 'latin-1'))
for key, value in self._headers.values()
]
return b'\r\n'.join(headers)
__bytes__ = serialize_headers
@property
def _content_type_for_repr(self):
return ', "%s"' % self['Content-Type'] if 'Content-Type' in self else ''
def _convert_to_charset(self, value, charset, mime_encode=False):
"""
Convert headers key/value to ascii/latin-1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` can't be represented in the given charset, apply MIME-encoding.
"""
if not isinstance(value, (bytes, str)):
value = str(value)
if ((isinstance(value, bytes) and (b'\n' in value or b'\r' in value)) or
isinstance(value, str) and ('\n' in value or '\r' in value)):
raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
try:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
else:
# Convert bytestring using given charset
value = value.decode(charset)
except UnicodeError as e:
if mime_encode:
value = Header(value, 'utf-8', maxlinelen=sys.maxsize).encode()
else:
e.reason += ', HTTP response headers must be in %s format' % charset
raise
return value
def __setitem__(self, header, value):
header = self._convert_to_charset(header, 'ascii')
value = self._convert_to_charset(value, 'latin-1', mime_encode=True)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
self._headers.pop(header.lower(), False)
def __getitem__(self, header):
return self._headers[header.lower()][1]
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False, samesite=None):
"""
Set a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then calculate ``max_age``.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
else:
self.cookies[key]['expires'] = ''
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = http_date(time.time() + max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
if samesite:
if samesite.lower() not in ('lax', 'none', 'strict'):
raise ValueError('samesite must be "lax", "none", or "strict".')
self.cookies[key]['samesite'] = samesite
def setdefault(self, key, value):
"""Set a header unless it has already been set."""
if key not in self:
self[key] = value
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
# Most browsers ignore the Set-Cookie header if the cookie name starts
# with __Host- or __Secure- and the cookie doesn't use the secure flag.
secure = key.startswith(('__Secure-', '__Host-'))
self.set_cookie(
key, max_age=0, path=path, domain=domain, secure=secure,
expires='Thu, 01 Jan 1970 00:00:00 GMT',
)
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# Handle string types -- we can't rely on force_bytes here because:
# - Python attempts str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, (bytes, memoryview)):
return bytes(value)
if isinstance(value, str):
return bytes(value.encode(self.charset))
# Handle non-string types.
return str(value).encode(self.charset)
# These methods partially implement the file-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closer in self._resource_closers:
try:
closer()
except Exception:
pass
# Free resources that were still referenced.
self._resource_closers.clear()
self.closed = True
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise OSError('This %s instance is not writable' % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise OSError('This %s instance cannot tell its position' % self.__class__.__name__)
# These methods partially implement a stream-like object interface.
# See https://docs.python.org/library/io.html#io.IOBase
def readable(self):
return False
def seekable(self):
return False
def writable(self):
return False
def writelines(self, lines):
raise OSError('This %s instance is not writable' % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content that can be read, appended to, or replaced.
"""
streaming = False
def __init__(self, content=b'', *args, **kwargs):
super().__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def __repr__(self):
return '<%(cls)s status_code=%(status_code)d%(content_type)s>' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
}
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b'\r\n\r\n' + self.content
__bytes__ = serialize
@property
def content(self):
return b''.join(self._container)
@content.setter
def content(self, value):
# Consume iterators upon assignment to allow repeated iteration.
if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):
content = b''.join(self.make_bytes(chunk) for chunk in value)
if hasattr(value, 'close'):
try:
value.close()
except Exception:
pass
else:
content = self.make_bytes(value)
# Create a list of properly encoded bytestrings to support write().
self._container = [content]
def __iter__(self):
return iter(self._container)
def write(self, content):
self._container.append(self.make_bytes(content))
def tell(self):
return len(self.content)
def getvalue(self):
return self.content
def writable(self):
return True
def writelines(self, lines):
for line in lines:
self.write(line)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super().__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
@property
def content(self):
raise AttributeError(
"This %s instance has no `content` attribute. Use "
"`streaming_content` instead." % self.__class__.__name__
)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
self._set_streaming_content(value)
def _set_streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, 'close'):
self._resource_closers.append(value.close)
def __iter__(self):
return self.streaming_content
def getvalue(self):
return b''.join(self.streaming_content)
class FileResponse(StreamingHttpResponse):
"""
A streaming HTTP response class optimized for files.
"""
block_size = 4096
def __init__(self, *args, as_attachment=False, filename='', **kwargs):
self.as_attachment = as_attachment
self.filename = filename
super().__init__(*args, **kwargs)
def _set_streaming_content(self, value):
if not hasattr(value, 'read'):
self.file_to_stream = None
return super()._set_streaming_content(value)
self.file_to_stream = filelike = value
if hasattr(filelike, 'close'):
self._resource_closers.append(filelike.close)
value = iter(lambda: filelike.read(self.block_size), b'')
self.set_headers(filelike)
super()._set_streaming_content(value)
def set_headers(self, filelike):
"""
Set some common response headers (Content-Length, Content-Type, and
Content-Disposition) based on the `filelike` response content.
"""
encoding_map = {
'bzip2': 'application/x-bzip',
'gzip': 'application/gzip',
'xz': 'application/x-xz',
}
filename = getattr(filelike, 'name', None)
filename = filename if (isinstance(filename, str) and filename) else self.filename
if os.path.isabs(filename):
self['Content-Length'] = os.path.getsize(filelike.name)
elif hasattr(filelike, 'getbuffer'):
self['Content-Length'] = filelike.getbuffer().nbytes
if self.get('Content-Type', '').startswith('text/html'):
if filename:
content_type, encoding = mimetypes.guess_type(filename)
# Encoding isn't set to prevent browsers from automatically
# uncompressing files.
content_type = encoding_map.get(encoding, content_type)
self['Content-Type'] = content_type or 'application/octet-stream'
else:
self['Content-Type'] = 'application/octet-stream'
filename = self.filename or os.path.basename(filename)
if filename:
disposition = 'attachment' if self.as_attachment else 'inline'
try:
filename.encode('ascii')
file_expr = 'filename="{}"'.format(filename)
except UnicodeEncodeError:
file_expr = "filename*=utf-8''{}".format(quote(filename))
self['Content-Disposition'] = '{}; {}'.format(disposition, file_expr)
elif self.as_attachment:
self['Content-Disposition'] = 'attachment'
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to, *args, **kwargs):
super().__init__(*args, **kwargs)
self['Location'] = iri_to_uri(redirect_to)
parsed = urlparse(str(redirect_to))
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
url = property(lambda self: self['Location'])
def __repr__(self):
return '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
'url': self.url,
}
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self['content-type']
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError("You cannot set content to a 304 (Not Modified) response")
self._container = []
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super().__init__(*args, **kwargs)
self['Allow'] = ', '.join(permitted_methods)
def __repr__(self):
return '<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>' % {
'cls': self.__class__.__name__,
'status_code': self.status_code,
'content_type': self._content_type_for_repr,
'methods': self['Allow'],
}
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
class JsonResponse(HttpResponse):
"""
An HTTP response class that consumes data to be serialized to JSON.
:param data: Data to be dumped into json. By default only ``dict`` objects
are allowed to be passed due to a security flaw before EcmaScript 5. See
the ``safe`` parameter for more information.
:param encoder: Should be a json encoder class. Defaults to
``django.core.serializers.json.DjangoJSONEncoder``.
:param safe: Controls if only ``dict`` objects may be serialized. Defaults
to ``True``.
:param json_dumps_params: A dictionary of kwargs passed to json.dumps().
"""
def __init__(self, data, encoder=DjangoJSONEncoder, safe=True,
json_dumps_params=None, **kwargs):
if safe and not isinstance(data, dict):
raise TypeError(
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False.'
)
if json_dumps_params is None:
json_dumps_params = {}
kwargs.setdefault('content_type', 'application/json')
data = json.dumps(data, cls=encoder, **json_dumps_params)
super().__init__(content=data, **kwargs)
| theo-l/django | django/http/response.py | Python | bsd-3-clause | 20,016 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class WebsiteRouteMeta(Document):
def autoname(self):
if self.name and self.name.startswith('/'):
self.name = self.name[1:]
| adityahase/frappe | frappe/website/doctype/website_route_meta/website_route_meta.py | Python | mit | 348 |
from django.utils.translation import ugettext as _
from cms.models import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import NewsItem
from .forms import CaptchaContactForm
class ContactFormPlugin(CMSPluginBase):
"""
Display a contact form plugin on the page
"""
model = CMSPlugin
name = "Contact Form"
render_template = "contact_form_plugin.html"
cache = False
def render(self, context, instance, placeholder):
request = context['request']
context.update({
'instance': instance,
'placeholder': placeholder,
'form': CaptchaContactForm(request=request),
})
return context
class NewsList(CMSPluginBase):
render_template = 'news_list.html'
name = _('News List')
allow_children = True
child_classes = ('NewsItemPublisher', )
cache = False
class NewsItemPublisher(CMSPluginBase):
model = NewsItem
name = _('News item')
render_template = 'news_item.html'
cache = False
plugin_pool.register_plugin(ContactFormPlugin)
plugin_pool.register_plugin(NewsList)
plugin_pool.register_plugin(NewsItemPublisher) | hrpt-se/hrpt | apps/hrptinfo/cms_plugins.py | Python | agpl-3.0 | 1,203 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.http import request
AVAILABLE_PRIORITIES = [
('0', 'Very Low'),
('1', 'Low'),
('2', 'Normal'),
('3', 'High'),
('4', 'Very High'),
]
class crm_tracking_medium(osv.Model):
# OLD crm.case.channel
_name = "crm.tracking.medium"
_description = "Channels"
_order = 'name'
_columns = {
'name': fields.char('Channel Name', required=True),
'active': fields.boolean('Active'),
}
_defaults = {
'active': lambda *a: 1,
}
class crm_tracking_campaign(osv.Model):
# OLD crm.case.resource.type
_name = "crm.tracking.campaign"
_description = "Campaign"
_rec_name = "name"
_columns = {
'name': fields.char('Campaign Name', required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
class crm_tracking_source(osv.Model):
_name = "crm.tracking.source"
_description = "Source"
_rec_name = "name"
_columns = {
'name': fields.char('Source Name', required=True, translate=True),
}
class crm_tracking_mixin(osv.AbstractModel):
"""Mixin class for objects which can be tracked by marketing. """
_name = 'crm.tracking.mixin'
_columns = {
'campaign_id': fields.many2one('crm.tracking.campaign', 'Campaign', # old domain ="['|',('section_id','=',section_id),('section_id','=',False)]"
help="This is a name that helps you keep track of your different campaign efforts Ex: Fall_Drive, Christmas_Special"),
'source_id': fields.many2one('crm.tracking.source', 'Source', help="This is the source of the link Ex: Search Engine, another domain, or name of email list"),
'medium_id': fields.many2one('crm.tracking.medium', 'Channel', help="This is the method of delivery. Ex: Postcard, Email, or Banner Ad"),
}
def tracking_fields(self):
return [('utm_campaign', 'campaign_id'), ('utm_source', 'source_id'), ('utm_medium', 'medium_id')]
def tracking_get_values(self, cr, uid, vals, context=None):
for key, field in self.tracking_fields():
column = self._all_columns[field].column
value = vals.get(field) or (request and request.session.get(key)) # params.get should be always in session by the dispatch from ir_http
if column._type in ['many2one'] and isinstance(value, basestring): # if we receive a string for a many2one, we search / create the id
if value:
Model = self.pool[column._obj]
rel_id = Model.name_search(cr, uid, value, context=context)
if rel_id:
rel_id = rel_id[0][0]
else:
rel_id = Model.create(cr, uid, {'name': value}, context=context)
vals[field] = rel_id
# Here the code for others cases that many2one
else:
vals[field] = value
return vals
def _get_default_track(self, cr, uid, field, context=None):
return self.tracking_get_values(cr, uid, {}, context=context).get(field)
_defaults = {
'source_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'source_id', ctx),
'campaign_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'campaign_id', ctx),
'medium_id': lambda self, cr, uid, ctx: self._get_default_track(cr, uid, 'medium_id', ctx),
}
class crm_case_stage(osv.osv):
""" Model for case stages. This models the main stages of a document
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.case.stage"
_description = "Stage of case"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'probability': fields.float('Probability (%)', required=True, help="This percentage depicts the default/average probability of the Case for this stage to be a success"),
'on_change': fields.boolean('Change Probability Automatically', help="Setting this stage will change the probability automatically on the opportunity."),
'requirements': fields.text('Requirements'),
'section_ids': fields.many2many('crm.case.section', 'section_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'case_default': fields.boolean('Default to New Sales Team',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
'fold': fields.boolean('Folded in Kanban View',
help='This stage is folded in the kanban view when'
'there are no records in that stage to display.'),
'type': fields.selection([('lead', 'Lead'), ('opportunity', 'Opportunity'), ('both', 'Both')],
string='Type', required=True,
help="This field is used to distinguish stages related to Leads from stages related to Opportunities, or to specify stages available for both types."),
}
_defaults = {
'sequence': 1,
'probability': 0.0,
'on_change': True,
'fold': False,
'type': 'both',
'case_default': True,
}
class crm_case_categ(osv.osv):
""" Category of Case """
_name = "crm.case.categ"
_description = "Category of Case"
_columns = {
'name': fields.char('Name', required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'object_id': fields.many2one('ir.model', 'Object Name'),
}
def _find_object_id(self, cr, uid, context=None):
"""Finds id for case object"""
context = context or {}
object_id = context.get('object_id', False)
ids = self.pool.get('ir.model').search(cr, uid, ['|', ('id', '=', object_id), ('model', '=', context.get('object_name', False))])
return ids and ids[0] or False
_defaults = {
'object_id': _find_object_id
}
class crm_payment_mode(osv.osv):
""" Payment Mode for Fund """
_name = "crm.payment.mode"
_description = "CRM Payment Mode"
_columns = {
'name': fields.char('Name', required=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| demon-ru/iml-crm | addons/crm/crm.py | Python | agpl-3.0 | 7,926 |
#!/usr/bin/env python2.7
from __future__ import absolute_import
import argparse
import json
import logging
import sys
import os
from . import gclserver
from . import lps
TextDocumentSyncKind_Full = 1
logger = logging.getLogger('gcls')
def main():
parser = argparse.ArgumentParser(description='GCL Language Protocol Server')
parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Show debug output')
parser.add_argument('--file', '-f', help='Read input from file instead of stdin (testing only)')
parser.add_argument('--include', '-i', action='append', default=[], help='GCL search directories (in addition to what\'s in GCLPATH)')
args = parser.parse_args()
loglevel = logging.ERROR if not args.verbose else logging.DEBUG
logging.basicConfig(format='%(asctime)-15s [%(levelname)s] %(message)s',
stream=sys.stderr,
level=loglevel)
try:
logger.info('Current directory is %s', os.getcwd())
search_directories = args.include
if 'GCLPATH' in os.environ:
search_directories.extend(os.environ['GCLPATH'].split(':'))
logger.info('Search path is %r', search_directories)
gcl_server = gclserver.GCLServer(search_directories)
handler = GCLProtocolHandler(gcl_server)
if args.file:
input_stream = open(args.file, 'rb')
else:
unbuffered_stdin = os.fdopen(sys.stdin.fileno(), 'rb', 0)
input_stream = unbuffered_stdin
proto_server = lps.LanguageProtocolServer(handler, input_stream, sys.stdout)
proto_server.run()
except Exception as e:
logger.exception('Uncaught error')
sys.exit(1)
class GCLProtocolHandler(lps.LanguageProtocolHandler):
"""Bridge between the Language Protocol and the GCL Server."""
def __init__(self, gcl_server):
self.gcl_server = gcl_server
def updateDocument(self, uri, text, diagnostic_publisher):
def report_parse_error(uri, line, col, messages):
# Report a multi-line error message, all at the given location
rng = lps.Range(
lps.Position(line - 1, col - 1),
lps.Position(line - 1, col + 2)) # Length is always 3
diagnostic_publisher(uri, [lps.Diagnostic(
range=rng,
severity=lps.DiagnosticSeverity.Error,
source='gcls',
message=m) for m in messages])
return self.gcl_server.update_document(uri, text, report_parse_error)
def getHoverInfo(self, uri, line, char):
value = self.gcl_server.hover_info(uri, line, char)
return lps.HoverInfo(language='gcl', value=value or '')
def getCompletions(self, uri, line, char):
completion_map = self.gcl_server.completions(uri, line, char)
return map(mkCompletion, completion_map.values())
def mkCompletion(c):
return lps.Completion(label=c.name,
kind=lps.CompletionKind.Field if c.builtin else lps.CompletionKind.Text,
detail='built-in' if c.builtin else '',
documentation=c.doc)
if __name__ == '__main__':
main()
| rix0rrr/gcl-language-server | gcl_language_server/main.py | Python | mit | 3,186 |
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import binascii
import os
import re
import StringIO
from boto.exception import BotoClientError
from boto.s3.key import Key as S3Key
from boto.s3.keyfile import KeyFile
from boto.utils import compute_hash
from boto.utils import get_utf8_value
class Key(S3Key):
"""
Represents a key (object) in a GS bucket.
:ivar bucket: The parent :class:`boto.gs.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in GS.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | DURABLE_REDUCED_AVAILABILITY.
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar generation: The generation number of the object.
:ivar metageneration: The generation number of the object metadata.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
:ivar cloud_hashes: Dictionary of checksums as supplied by the storage
provider.
"""
def __init__(self, bucket=None, name=None, generation=None):
super(Key, self).__init__(bucket=bucket, name=name)
self.generation = generation
self.meta_generation = None
self.cloud_hashes = {}
self.component_count = None
def __repr__(self):
if self.generation and self.metageneration:
ver_str = '#%s.%s' % (self.generation, self.metageneration)
else:
ver_str = ''
if self.bucket:
return '<Key: %s,%s%s>' % (self.bucket.name, self.name, ver_str)
else:
return '<Key: None,%s%s>' % (self.name, ver_str)
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
elif name == 'Generation':
self.generation = value
elif name == 'MetaGeneration':
self.metageneration = value
else:
setattr(self, name, value)
def handle_version_headers(self, resp, force=False):
self.metageneration = resp.getheader('x-goog-metageneration', None)
self.generation = resp.getheader('x-goog-generation', None)
def handle_restore_headers(self, response):
return
def handle_addl_headers(self, headers):
for key, value in headers:
if key == 'x-goog-hash':
for hash_pair in value.split(','):
alg, b64_digest = hash_pair.strip().split('=', 1)
self.cloud_hashes[alg] = binascii.a2b_base64(b64_digest)
elif key == 'x-goog-component-count':
self.component_count = int(value)
elif key == 'x-goog-generation':
self.generation = value
# Use x-goog-stored-content-encoding and
# x-goog-stored-content-length to indicate original content length
# and encoding, which are transcoding-invariant (so are preferable
# over using content-encoding and size headers).
elif key == 'x-goog-stored-content-encoding':
self.content_encoding = value
elif key == 'x-goog-stored-content-length':
self.size = int(value)
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
# For GCS we need to include the object generation in the query args.
# The rest of the processing is handled in the parent class.
if self.generation:
if query_args:
query_args += '&'
query_args += 'generation=%s' % self.generation
super(Key, self).open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries,
response_headers=response_headers)
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None):
query_args = None
if self.generation:
query_args = ['generation=%s' % self.generation]
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=hash_algs,
query_args=query_args)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None,
hash_algs=None):
"""
Retrieve an object from GCS using the name of the Key object as the
key in GCS. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to GCS and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/sMkcC for details.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
hash_algs=hash_algs)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers,
hash_algs=hash_algs)
def compute_hash(self, fp, algorithm, size=None):
"""
:type fp: file
:param fp: File pointer to the file to hash. The file
pointer will be reset to the same position before the
method returns.
:type algorithm: zero-argument constructor for hash objects that
implements update() and digest() (e.g. hashlib.md5)
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_hash(
fp, size=size, hash_algorithm=algorithm)
# The internal implementation of compute_hash() needs to return the
# data size, but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code), so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
"""
Upload a file to GCS.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib.md5}.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size,
hash_algs=hash_algs)
def delete(self, headers=None):
return self.bucket.delete_key(self.name, version_id=self.version_id,
generation=self.generation,
headers=headers)
def add_email_grant(self, permission, email_address):
"""
Convenience method that provides a quick way to add an email grant to a
key. This method retrieves the current ACL, creates a new grant based on
the parameters passed in, adds that grant to the ACL and then PUT's the
new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
account to which you are granting the permission.
"""
acl = self.get_acl()
acl.add_email_grant(permission, email_address)
self.set_acl(acl)
def add_user_grant(self, permission, user_id):
"""
Convenience method that provides a quick way to add a canonical user
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type user_id: string
:param user_id: The canonical user id associated with the GS account to
which you are granting the permission.
"""
acl = self.get_acl()
acl.add_user_grant(permission, user_id)
self.set_acl(acl)
def add_group_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type email_address: string
:param email_address: The email address associated with the Google
Group to which you are granting the permission.
"""
acl = self.get_acl(headers=headers)
acl.add_group_email_grant(permission, email_address)
self.set_acl(acl, headers=headers)
def add_group_grant(self, permission, group_id):
"""
Convenience method that provides a quick way to add a canonical group
grant to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL and
then PUT's the new ACL back to GS.
:type permission: string
:param permission: The permission being granted. Should be one of:
READ|FULL_CONTROL
See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
for more details on permissions.
:type group_id: string
:param group_id: The canonical group id associated with the Google
Groups account you are granting the permission to.
"""
acl = self.get_acl()
acl.add_group_grant(permission, group_id)
self.set_acl(acl)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
res_upload_handler=None, size=None, rewind=False,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file pointed to by 'fp' as the
contents.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter, this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the
file up into different ranges to be uploaded. If not
specified, the default behaviour is to read all bytes
from the file pointer. Less bytes may be available.
Notes:
1. The "size" parameter currently cannot be used when
a resumable upload handler is given but is still
useful for uploading part of a file as implemented
by the parent class.
2. At present Google Cloud Storage does not support
multipart uploads.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will be
rewound to the start before any bytes are read from
it. The default behaviour is False which reads from
the current position of the file pointer (fp).
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
:rtype: int
:return: The number of bytes written to the key.
TODO: At some point we should refactor the Bucket and Key classes,
to move functionality common to all providers into a parent class,
and provider-specific functionality into subclasses (rather than
just overriding/sharing code the way it currently works).
"""
provider = self.bucket.connection.provider
if res_upload_handler and size:
# could use size instead of file_length if provided but...
raise BotoClientError(
'"size" param not supported for resumable uploads.')
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
if md5 is None:
md5 = self.compute_md5(fp, size)
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
if if_generation is not None:
headers['x-goog-if-generation-match'] = str(if_generation)
if res_upload_handler:
res_upload_handler.send_file(self, fp, headers, cb, num_cb)
else:
# Not a resumable transfer so use basic send_file mechanism.
self.send_file(fp, headers, cb, num_cb, size=size)
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=None,
res_upload_handler=None,
if_generation=None):
"""
Store an object in GS using the name of the Key object as the
key in GS and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto GS
:type headers: dict
:param headers: Additional headers to pass along with the request to GS.
:type replace: bool
:param replace: If True, replaces the contents of the file if it
already exists.
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from GS and the second representing
the total number of bytes that need to be transmitted.
:type cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be called
during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type md5: A tuple containing the hexdigest version of the MD5 checksum
of the file as the first element and the Base64-encoded version of
the plain checksum as the second element. This is the same format
returned by the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior to
upload, it's silly to have to do it twice so this param, if present,
will be used as the MD5 values of the file. Otherwise, the checksum
will be computed.
:type res_upload_handler: ResumableUploadHandler
:param res_upload_handler: If provided, this handler will perform the
upload.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed hashes, since we are setting the
# content.
self.local_hashes = {}
with open(filename, 'rb') as fp:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler,
if_generation=if_generation)
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
if_generation=None):
"""
Store an object in GCS using the name of the Key object as the
key in GCS and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to GCS and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in GCS.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the
second element. This is the same format returned by
the compute_md5 method.
:param md5: If you need to compute the MD5 for any reason prior
to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values
of the file. Otherwise, the checksum will be computed.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
# Clear out any previously computed md5 hashes, since we are setting the content.
self.md5 = None
self.base64md5 = None
fp = StringIO.StringIO(get_utf8_value(s))
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5,
if_generation=if_generation)
fp.close()
return r
def set_contents_from_stream(self, *args, **kwargs):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the
object will only be written to if its current generation number is
this value. If set to the value 0, the object will only be written
if it doesn't already exist.
"""
if_generation = kwargs.pop('if_generation', None)
if if_generation is not None:
headers = kwargs.get('headers', {})
headers['x-goog-if-generation-match'] = str(if_generation)
kwargs['headers'] = headers
super(Key, self).set_contents_from_stream(*args, **kwargs)
def set_acl(self, acl_or_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets the ACL for this object.
:type acl_or_str: string or :class:`boto.gs.acl.ACL`
:param acl_or_str: A canned ACL string (see
:data:`~.gs.acl.CannedACLStrings`) or an ACL object.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
self.bucket.set_acl(acl_or_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def get_acl(self, headers=None, generation=None):
"""Returns the ACL of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: :class:`.gs.acl.ACL`
"""
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers,
generation=generation)
def get_xml_acl(self, headers=None, generation=None):
"""Returns the ACL string of this object.
:param dict headers: Additional headers to set during the request.
:param int generation: If specified, gets the ACL for a specific
generation of a versioned object. If not specified, the current
version is returned.
:rtype: str
"""
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers,
generation=generation)
def set_xml_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL to an XML string.
:type acl_str: string
:param acl_str: A string containing the ACL XML.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration)
def set_canned_acl(self, acl_str, headers=None, generation=None,
if_generation=None, if_metageneration=None):
"""Sets this objects's ACL using a predefined (canned) value.
:type acl_str: string
:param acl_str: A canned ACL string. See
:data:`~.gs.acl.CannedACLStrings`.
:type headers: dict
:param headers: Additional headers to set during the request.
:type generation: int
:param generation: If specified, sets the ACL for a specific generation
of a versioned object. If not specified, the current version is
modified.
:type if_generation: int
:param if_generation: (optional) If set to a generation number, the acl
will only be updated if its current generation number is this value.
:type if_metageneration: int
:param if_metageneration: (optional) If set to a metageneration number,
the acl will only be updated if its current metageneration number is
this value.
"""
if self.bucket is not None:
return self.bucket.set_canned_acl(
acl_str,
self.name,
headers=headers,
generation=generation,
if_generation=if_generation,
if_metageneration=if_metageneration
)
def compose(self, components, content_type=None, headers=None):
"""Create a new object from a sequence of existing objects.
The content of the object representing this Key will be the
concatenation of the given object sequence. For more detail, visit
https://developers.google.com/storage/docs/composite-objects
:type components list of Keys
:param components List of gs.Keys representing the component objects
:type content_type (optional) string
:param content_type Content type for the new composite object.
"""
compose_req = []
for key in components:
if key.bucket.name != self.bucket.name:
raise BotoClientError(
'GCS does not support inter-bucket composing')
generation_tag = ''
if key.generation:
generation_tag = ('<Generation>%s</Generation>'
% str(key.generation))
compose_req.append('<Component><Name>%s</Name>%s</Component>' %
(key.name, generation_tag))
compose_req_xml = ('<ComposeRequest>%s</ComposeRequest>' %
''.join(compose_req))
headers = headers or {}
if content_type:
headers['Content-Type'] = content_type
resp = self.bucket.connection.make_request(
'PUT', get_utf8_value(self.bucket.name), get_utf8_value(self.name),
headers=headers, query_args='compose',
data=get_utf8_value(compose_req_xml))
if resp.status < 200 or resp.status > 299:
raise self.bucket.connection.provider.storage_response_error(
resp.status, resp.reason, resp.read())
# Return the generation so that the result URI can be built with this
# for automatic parallel uploads.
return resp.getheader('x-goog-generation')
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/boto-2.27.0-py2.7.egg/boto/gs/key.py | Python | gpl-3.0 | 42,479 |
from flask import Flask
from flask import request
import sys
sys.path.append('../../')
from ..ResourceBase import *
from models import IncentivadorModelObject
from ..serialization import listify_queryset
from ..format_utils import remove_blanks, cgccpf_mask
from ..security import encrypt, decrypt
import pymssql, json
class IncentivadorDetail(ResourceBase):
def build_links(self, args = {}):
incentivador_id = args['incentivador_id']
self.links['self'] += incentivador_id
self.links['doacoes'] = self.links['self'] + '/doacoes'
def __init__(self):
self.tipos_pessoa = {'1' : 'fisica', '2' : 'juridica'}
super (IncentivadorDetail,self).__init__()
self.links = {
"self" : app.config['API_ROOT_URL']+'incentivadores/'
}
def hal_builder(data, args = {}):
hal_data = data
hal_data['_links'] = self.links
return hal_data
self.to_hal = hal_builder
def get(self, incentivador_id):
cgccpf = decrypt(incentivador_id)
try:
results, n_records = IncentivadorModelObject().all(limit=1, offset=0, cgccpf = cgccpf)
except Exception as e:
Log.error( str(e))
result = {'message' : 'internal error',
'message_code' : 13,
'more' : 'something is broken'
}
return self.render(result, status_code = 503)
if n_records == 0 or len(results) == 0:
result = {'message' : 'No donator was found with your criteria',
'message_code' : 11}
return self.render(result, status_code = 404)
headers = {}
data = listify_queryset(results)
incentivador = data[0]
incentivador["cgccpf"] = remove_blanks(str(incentivador["cgccpf"]))
self.build_links(args = {'incentivador_id' : incentivador_id})
incentivador["cgccpf"] = cgccpf_mask(incentivador["cgccpf"])
return self.render(incentivador, headers)
| Lafaiet/salicapi | salic-api/resources/incentivador/Incentivador_detail.py | Python | gpl-3.0 | 2,108 |
"""
To use this script, type:
$ python visualize_preds_actual.py [handle]
Of course, replace [handle] with the common prefix of the pickled data.
"""
import matplotlib.pyplot as plt
import pickle as pkl
import sys
import seaborn as sns
sns.set_context('poster')
from graphfp.utils import y_equals_x
if __name__ == '__main__':
handle = sys.argv[1]
step_size = int(sys.argv[2])
animate = eval(sys.argv[3])
with open('{0}_predsactual.pkl'.format(handle), 'rb') as f:
preds_vs_actual = pkl.load(f)
with open('{0}_trainloss.pkl'.format(handle), 'rb') as f:
trainloss = pkl.load(f)
with open('{0}_predsactual_cv.pkl'.format(handle), 'rb') as f:
preds_vs_actual_cv = pkl.load(f)
with open('{0}_trainloss_cv.pkl'.format(handle), 'rb') as f:
trainloss_cv = pkl.load(f)
fig = plt.figure(figsize=(10, 4))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax2.yaxis.tick_right()
if animate:
plt.show(block=False)
for i, data in sorted(preds_vs_actual.items(), key=lambda x: x[0]):
if i % step_size == 0:
ax1.clear()
ax1.scatter(data['preds'], data['actual'], color='blue')
ax1.scatter(preds_vs_actual_cv[i]['preds'],
preds_vs_actual_cv[i]['actual'], color='red')
ax1.set_title('Iteration {0}'.format(i))
ax1.set_xlabel('predictions')
ax1.set_ylabel('actual')
ax1.plot(y_equals_x(data['actual']),
y_equals_x(data['actual']),
marker='o', ls='-', lw=2, color='red', alpha=0.2)
ax2.clear()
ax2.set_xlim(0, len(trainloss))
ax2.plot(trainloss[:i], color='blue', label='train')
ax2.plot(trainloss_cv[:i], color='red', label='test')
ax2.set_title('Iteration {0}'.format(i))
ax2.set_xlabel('iteration number')
ax2.set_ylabel('training error')
ax2.set_yscale('log')
ax2.legend()
plt.draw()
plt.pause(1/100)
plt.savefig('{0}_preds_trainloss.pdf'.format(handle),
bbox_inches='tight')
plt.show(block=True)
else:
li = max(preds_vs_actual.keys())
ax1.scatter(preds_vs_actual[li]['preds'], preds_vs_actual[li]['actual'], color='blue')
ax1.scatter(preds_vs_actual_cv[li]['preds'],
preds_vs_actual_cv[li]['actual'], color='red')
ax1.set_xlabel('predictions')
ax1.set_ylabel('actual')
ax1.set_title('convnet')
ax2.set_xlim(0, len(trainloss))
ax2.plot(trainloss[:li], color='blue', label='train')
ax2.plot(trainloss_cv[:li], color='red', label='test')
ax2.set_xlabel('iteration number')
ax2.set_ylabel('training error')
ax2.set_yscale('log')
ax2.set_title('train error')
ax2.legend()
plt.subplots_adjust(bottom=0.2)
plt.savefig('{0}_preds_trainloss.pdf'.format(handle),
bbox_inches='tight')
| ericmjl/protein-convolutional-nets | experiments/minibatch/visualize_preds_actual.py | Python | mit | 3,186 |
#!/bin/py
"""Usage: python get_files.py OPTIONS HDF5FILE
Given the input files (in general not sorted),
this script selects the input files such that the sorted time
series (t0, t1, ..., tn) contains the ref_file (if given), and
the time difference between files is dt_target, with a relative
dt_tolerance smaller than dt_target*tolerance. To set the
processing parameters refer to the Options description below.
Also:
* The files with "time" out of tolerance are discarded.
* The files selected for processing are those with smaller
time discrepancy with respect to the exact time series.
* If there are gaps in the final time series the
output file is not written, unless the option
--ignore_gaps is set.
* The results are stored in the "files.txt"
file. If the file exists the output is not written
unless the --clobber option is set (in which
case the existing output file is deleted).
Options:
-h --help This help message.
-r --ref_file= File to use for time reference.
The time of the first input file
is used by default.
--clobber Overwrite old output file.
--dt_target= Target time difference between files.
The time difference of the first
two files is used by default.
--ignore_gaps Process files even if there
are gaps in the time series of
files to process. The computation
doesn't proceed by default.
--tolerance= Maximum fraction of delta t
abs(target time-file time)/dt_target
for a file to be processed. The
tolerance value should be between
0 and 0.5. Default is 0.1.
--min_t= Minimum value of t to include
--max_t= Maximum value of t to include
"""
import getopt
import glob
import h5py
import math
import numpy as np
import os
import sys
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def get_files_to_process(files,
t_ref_file = "",
dt_target = 0.0,
tolerance = 0.1,
min_t = float("-inf"),
max_t = float("inf")):
# Set target delta t and tolerance
dt_tgt = float(dt_target)
tolerance = float(tolerance)
# Pick the first file in input to set the time
# reference if a reference file is not provided explicitly
if t_ref_file == "":
t_ref_file = files[0]
# Grab reference time
ff = h5py.File(t_ref_file,'r')
t_ref=ff['t'].value[0]
ff.close()
#print "Reference time file: ", t_ref_file, t_ref
# Reset dt_tgt if zero to the time difference
# of the first two input files
if dt_tgt == 0:
ff = h5py.File(files[0],'r+')
t0 =ff['t'].value[0]
ff.close()
ff = h5py.File(files[1],'r+')
t1 =ff['t'].value[0]
ff.close()
dt_tgt = abs(t0-t1)
print dt_tgt
# Pre-select files to process
tt = []
files_to_process = []
print "Selecting files to process"
for fi in files:
if not os.path.isfile(fi):
print "File ", fi, " not found"
continue
f = h5py.File(fi,'r')
# Grab time
time=f['t'].value[0]
# Decide if it should process it or skip it
# If time is within tolerance and a file
# has been selected for that slot, keep the
# one with less difference wrt the ideal
# time for that slot
skip_this_file = False
is_time_accounted_for = False
replace_for_this_file = False
time_mod = ((time-t_ref) % dt_tgt) / dt_tgt
if time_mod > 0.5: time_mod = 1 - time_mod
# Check if time is within cuttoffs:
if time<min_t or time>max_t:
skip_this_file = True
# Check if time is within tolerance:
elif (time_mod < tolerance):
# This is a candidate file,
# now check if this time has been accounted for
for i in xrange(len(tt)):
time_accounted_for = tt[i]
if ((np.abs(time-time_accounted_for)/dt_tgt) < 0.5):
# This time slot has been accounted for, skip
# adding it to the list
is_time_accounted_for = True
skip_this_file = True
# ... but check if it should replace the existing one
# because of being closer to the corresponding
# target time
time_mod0 = ((time_accounted_for-t_ref) % dt_tgt) / dt_tgt
if time_mod0 > 0.5: time_mod0 = 1 - time_mod0
# print time_mod, time_mod0, time_mod < time_mod0
if time_mod < time_mod0:
# .. replace the existing one by the current candidate
del files_to_process[i]
del tt[i]
files_to_process.insert(i, os.path.realpath(fi).rstrip())
tt.insert(i, time)
replace_for_this_file = True
break
else:
# Skip this file because is not within tolerance
skip_this_file = True
# print "time = ", time
# print "time_mod = ", time_mod
# print "tolerance = ", tolerance
# print "Is time mod within tolerance? ", (time_mod < tolerance)
# print "skip_this_file = ", skip_this_file
# print "is_time_accounted_for = ", is_time_accounted_for
# print "replace_for_this_file = ", replace_for_this_file
# Now ignore the file or add it to the list
if skip_this_file:
# Continue with the next file without adding this one
# to the list
continue
else:
# Add this file to the list of files to process
files_to_process.append(os.path.realpath(fi).rstrip())
tt.append(time)
#print files_to_process
# Check for gaps in the time series
gaps = False
dts = np.diff(sorted(tt))
for dt in dts:
if dt > dt_tgt * (1+2*tolerance):
gaps = True
#print 'Found gap: ', dt, dt_tgt
break
# Sort files in increasing simulation time order
all_files = np.array(tt).reshape(len(tt),1)
all_files = np.concatenate((all_files, np.array(files_to_process).reshape(len(files_to_process),1)), axis=1)
all_files_sort = all_files[all_files[:,0].argsort()]
all_files = all_files.transpose()
all_files_sort = all_files_sort.transpose()
#print all_files_sort
# Return values
retval = {}
retval['files'] = all_files_sort
retval['gaps'] = gaps
return retval
def main(argv=None):
# Set defaults
clobber = False
dt_target = 0.0
ignore_gaps = False
outfile = "files.txt"
ref_file = ""
tolerance = 0.1
min_t = float("-inf")
max_t = float("inf")
# Permit interactive use
if argv is None:
argv = sys.argv
# Parse and check incoming command line arguments
try:
try:
opts, args = getopt.getopt(argv[1:], "hr:",
["help", "ref_file=", "dt_target=", "tolerance=",
"ignore_gaps","clobber",
"min_t=", "max_t="
])
except getopt.error, msg:
raise Usage(msg)
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
return 0
if o in ("-r", "--ref_file"):
ref_file = a
if o in ("--dt_target"):
dt_target=float(a)
if o in ("--tolerance"):
tolerance=float(a)
if o in ("--ignore_gaps"):
ignore_gaps=True
if o in ("--clobber"):
clobber = True
if o in ("--min_t"):
min_t = a
if o in ("--max_t"):
max_t = a
# Check for proper number of arguments
if len(args) < 1:
print args, len(args)
print >> sys.stderr, "Incorrect number of arguments. See --help."
return 2
# Check for reference time file
if ref_file != "" and not os.path.isfile(ref_file):
print >> sys.stderr, "Reference file not found"
return 2
# Check for output file
if os.path.isfile(outfile):
if clobber:
os.system("rm " + outfile)
else:
print >> sys.stderr, ("Output file already exists.\n"
"Use option --clobber to overwrite.")
return 2
# Check for tolerance range
if tolerance < 0.0 or tolerance > 0.5:
print >> sys.stderr, "Option 'tolerance' out of range. See --help."
return 2
# Check time cuttofs
if not is_number(min_t):
print >> sys.stderr, "min_t is not a number. See --help."
return 2
else:
min_t = float(min_t)
if not is_number(max_t):
print >> sys.stderr, "max_t is not a number. See --help."
return 2
else:
max_t = float(max_t)
if min_t > max_t:
print >> sys.stderr, "min_t is larger than max_t. See --help."
return 2
except Usage, err:
print >> sys.stderr, err.msg
return 2
# Input files
hdf5files = args
# Get sorted list of files
files_dict = get_files_to_process(hdf5files, ref_file, dt_target, tolerance,
min_t, max_t)
files = files_dict['files']
gaps = files_dict['gaps']
# Print sorted files and dt
print files[1,0], files[0,0]
for i in xrange(1,len(files[0,:])):
print files[1,i], files[0,i], float(files[0,i]) - float(files[0,i-1])
# Output to text file
if not gaps:
print "There are no gaps in the time series"
np.savetxt(outfile, files[1,:], fmt='%s')
else: # there are gaps in the time series
if ignore_gaps:
print ("WARNING: There are gaps in the time series."
" Generating ", outfile, " due to --ignore_gaps.")
np.savetxt(outfile, files[1,:], fmt='%s')
else: # ignore gaps and compute anyway
print >>sys.stderr, "There are gaps in the time series."
return 2
sys.exit("Done")
if __name__ == "__main__":
sys.exit(main())
| RhysU/suzerain | postproc/select_files.py | Python | gpl-3.0 | 10,811 |
# Copyright 2016, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# (c) 2016, Kevin Carter <kevin.carter@rackspace.com>
import copy
import importlib.util
import os
def load_module(name, path):
module_spec = importlib.util.spec_from_file_location(
name, path
)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
return module
# NOTICE(cloudnull): The connection plugin imported using the full path to the
# file because the linear strategy plugin is not importable.
import ansible.plugins.strategy as strategy
LINEAR = load_module(
'ssh',
os.path.join(os.path.dirname(strategy.__file__), 'linear.py')
)
# NOTICE(jmccrory): MAGIC_VARIABLE_MAPPING is imported so that additional
# container specific variables can be made available to
# the connection plugin.
# In Ansible 2.5 the magic variable mapping has been moved,
# but updating it directly is no longer necessary. The
# variables can be made available through being defined in
# the connection plugin's docstring and this can eventually
# be removed.
try:
from ansible.playbook.play_context import MAGIC_VARIABLE_MAPPING
MAGIC_VARIABLE_MAPPING.update({
'physical_host': ('physical_host',),
'container_name': ('inventory_hostname',),
'container_tech': ('container_tech',),
'container_user': ('container_user',),
})
except ImportError:
pass
class StrategyModule(LINEAR.StrategyModule):
"""Notes about this strategy.
When this strategy encounters a task with a "when" or "register" stanza it
will collect results immediately essentially forming a block. If the task
does not have a "when" or "register" stanza the results will be collected
after all tasks have been queued.
To improve execution speed if a task has a "when" conditional attached to
it the conditional will be rendered before queuing the task and should the
conditional evaluate to True the task will be queued. To ensure the correct
execution of playbooks this optimisation will only be used if there are no
lookups used with the task which is to guarantee proper task execution.
Container context will be added to the ``playbook_context`` which is used
to further optimise connectivity by only ever SSH'ing into a given host
machine instead of attempting an SSH connection into a container.
"""
@staticmethod
def _check_when(host, task, templar, task_vars):
"""Evaluate if conditionals are to be run.
This will error on the side of caution:
* If a conditional is detected to be valid the method will return
True.
* If there's ever an issue with the templated conditional the
method will also return True.
* If the task has a detected "with" the method will return True.
:param host: object
:param task: object
:param templar: object
:param task_vars: dict
"""
try:
if not task.when or (task.when and task.register):
return True
_ds = getattr(task, '_ds', dict())
if any([i for i in _ds.keys() if i.startswith('with')]):
return True
conditional = task.evaluate_conditional(templar, task_vars)
if not conditional:
LINEAR.display.verbose(
u'Task "%s" has been omitted from the job because the'
u' conditional "%s" was evaluated as "%s"'
% (task.name, task.when, conditional),
host=host,
caplevel=0
)
return False
except Exception:
return True
else:
return True
def _queue_task(self, host, task, task_vars, play_context):
"""Queue a task to be sent to the worker.
Set a host variable, 'physical_host_addrs', containing a dictionary of
each physical host and its 'ansible_host' variable.
"""
templar = LINEAR.Templar(loader=self._loader, variables=task_vars)
if not self._check_when(host, task, templar, task_vars):
return
pha = task_vars['physical_host_addrs'] = dict()
physical_host_items = [task_vars.get('physical_host')]
if task.delegate_to:
# For delegated tasks, we also need the information from the delegated hosts
for delegated_host in task_vars.get('ansible_delegated_vars', dict()).keys():
LINEAR.display.verbose(
u'Task is delegated to %s.' % delegated_host,
host=host,
caplevel=0
)
delegated_host_info = self._inventory.get_host(u'%s' % delegated_host)
# This checks if we are delegating to a host which does not exist
# in the inventory (possibly using its IP address)
if delegated_host_info is None:
task_vars['container_name'] = None
continue
physical_host_vars = delegated_host_info.get_vars()
physical_host_templar = LINEAR.Templar(loader=self._loader,
variables=physical_host_vars)
delegated_physical_host = physical_host_templar.template(
physical_host_vars.get('physical_host'))
if delegated_physical_host:
physical_host_items.append(delegated_physical_host)
LINEAR.display.verbose(
u'Task is delegated to %s. Adding its physical host %s'
% (delegated_host, delegated_physical_host),
host=host,
caplevel=0
)
for physical_host_item in physical_host_items:
ph = self._inventory.get_host(physical_host_item)
if ph:
LINEAR.display.verbose(
u'The "physical_host" variable of "%s" has been found to'
u' have a corresponding host entry in inventory.'
% physical_host_item,
host=host,
caplevel=0
)
physical_host_vars = ph.get_vars()
for item in ['ansible_host', 'container_address', 'address']:
addr = physical_host_vars.get(item)
if addr:
LINEAR.display.verbose(
u'The "physical_host" variable of "%s" terminates'
u' at "%s" using the host variable "%s".' % (
physical_host_item,
addr,
item
),
host=host,
caplevel=0
)
pha[ph.name] = addr
break
return super(StrategyModule, self)._queue_task(
host,
task,
task_vars,
play_context
)
| openstack/openstack-ansible-plugins | plugins/strategy/linear.py | Python | apache-2.0 | 7,874 |
from __future__ import absolute_import
from __future__ import print_function
import filecmp
import os
import ujson
from django.core import mail
from django.http import HttpResponse
from django.test import override_settings
from mock import patch
from typing import Any, Dict, List
from zerver.lib.actions import do_change_stream_invite_only
from zerver.models import get_realm, get_stream, get_user_profile_by_email, \
Realm, Stream, UserProfile
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
avatar_disk_path, get_test_image_file, tornado_redirected_to_list,
)
class BotTest(ZulipTestCase):
def assert_num_bots_equal(self, count):
# type: (int) -> None
result = self.client_get("/json/bots")
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertEqual(count, len(json['bots']))
def create_bot(self, **extras):
# type: (**Any) -> Dict[str, Any]
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
bot_info.update(extras)
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
return ujson.loads(result.content)
def test_bot_domain(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.create_bot()
self.assertTrue(UserProfile.objects.filter(email='hambot-bot@zulip.testserver').exists())
# The other cases are hard to test directly, since we don't allow creating bots from
# the wrong subdomain, and because 'testserver.example.com' is not a valid domain for the bot's email.
# So we just test the Raelm.get_bot_domain function.
realm = get_realm('zulip')
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
self.assertEqual(realm.get_bot_domain(), 'zulip.testserver')
Realm.objects.exclude(string_id='zulip').update(deactivated=True)
self.assertEqual(realm.get_bot_domain(), 'testserver')
def deactivate_bot(self):
# type: () -> None
result = self.client_delete("/json/bots/hambot-bot@zulip.testserver")
self.assert_json_success(result)
def test_add_bot_with_bad_username(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
bot_info = dict(
full_name='',
short_name='',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Bad name or username')
self.assert_num_bots_equal(0)
def test_add_bot(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot()
self.assert_num_bots_equal(1)
bot = get_user_profile_by_email('hambot-bot@zulip.testserver')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.testserver',
user_id=bot.id,
full_name='The Bot of Hamlet',
is_active=True,
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream=None,
default_all_public_streams=False,
owner='hamlet@zulip.com')
),
event['event']
)
users_result = self.client_get('/json/users')
members = ujson.loads(users_result.content)['members']
bots = [m for m in members if m['email'] == 'hambot-bot@zulip.testserver']
self.assertEqual(len(bots), 1)
bot = bots[0]
self.assertEqual(bot['bot_owner'], 'hamlet@zulip.com')
self.assertEqual(bot['user_id'], get_user_profile_by_email('hambot-bot@zulip.testserver').id)
def test_add_bot_with_username_in_use(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
bot_info = dict(
full_name='Duplicate',
short_name='hambot',
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'Username already in use')
def test_add_bot_with_user_avatar(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
with get_test_image_file('img.png') as fp:
self.create_bot(file=fp)
profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
# Make sure that avatar image that we've uploaded is same with avatar image in the server
self.assertTrue(filecmp.cmp(fp.name,
os.path.splitext(avatar_disk_path(profile))[0] +
".original"))
self.assert_num_bots_equal(1)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
self.assertTrue(os.path.exists(avatar_disk_path(profile)))
def test_add_bot_with_too_many_files(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
with get_test_image_file('img.png') as fp1, \
get_test_image_file('img.gif') as fp2:
bot_info = dict(
full_name='whatever',
short_name='whatever',
file1=fp1,
file2=fp2,
)
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, 'You may only upload one file at a time')
self.assert_num_bots_equal(0)
def test_add_bot_with_default_sending_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
def test_add_bot_with_default_sending_stream_not_subscribed(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_sending_stream='Rome')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Rome')
profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
self.assertEqual(profile.default_sending_stream.name, 'Rome')
def test_bot_add_subscription(self):
# type: () -> None
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and a stream to the
list of subscriptions and confirm the right number of events
are generated.
When 'principals' has a bot, no notification message event or invitation email
is sent when add_subscriptions_backend is called in the above api call.
"""
self.login("hamlet@zulip.com")
# Normal user i.e. not a bot.
request_data = {
'principals': '["iago@zulip.com"]'
}
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.common_subscribe_to_streams("hamlet@zulip.com", ['Rome'], request_data)
self.assert_json_success(result)
msg_event = [e for e in events if e['event']['type'] == 'message']
self.assert_length(msg_event, 1) # Notification message event is sent.
# Create a bot.
self.assert_num_bots_equal(0)
result = self.create_bot()
self.assert_num_bots_equal(1)
# A bot
bot_request_data = {
'principals': '["hambot-bot@zulip.testserver"]'
}
events_bot = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events_bot):
result = self.common_subscribe_to_streams("hamlet@zulip.com", ['Rome'], bot_request_data)
self.assert_json_success(result)
# No notification message event or invitation email is sent because of bot.
msg_event = [e for e in events_bot if e['event']['type'] == 'message']
self.assert_length(msg_event, 0)
self.assertEqual(len(events_bot), len(events) - 1)
# Test runner automatically redirects all sent email to a dummy 'outbox'.
self.assertEqual(len(mail.outbox), 0)
def test_add_bot_with_default_sending_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
self.subscribe_to_stream(user_profile.email, stream.name)
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot(default_sending_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_sending_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
self.assertEqual(profile.default_sending_stream.name, 'Denmark')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.testserver',
user_id=profile.id,
full_name='The Bot of Hamlet',
is_active=True,
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream='Denmark',
default_events_register_stream=None,
default_all_public_streams=False,
owner='hamlet@zulip.com')
),
event['event']
)
self.assertEqual(event['users'], (user_profile.id,))
def test_add_bot_with_default_sending_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
self.unsubscribe_from_stream("hamlet@zulip.com", "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
'default_sending_stream': 'Denmark',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_add_bot_with_default_events_register_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_events_register_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_events_register_stream'], 'Denmark')
profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
self.assertEqual(profile.default_events_register_stream.name, 'Denmark')
def test_add_bot_with_default_events_register_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = self.subscribe_to_stream(user_profile.email, 'Denmark')
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
events = [] # type: List[Dict[str, Any]]
with tornado_redirected_to_list(events):
result = self.create_bot(default_events_register_stream='Denmark')
self.assert_num_bots_equal(1)
self.assertEqual(result['default_events_register_stream'], 'Denmark')
bot_profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
self.assertEqual(bot_profile.default_events_register_stream.name, 'Denmark')
event = [e for e in events if e['event']['type'] == 'realm_bot'][0]
self.assertEqual(
dict(
type='realm_bot',
op='add',
bot=dict(email='hambot-bot@zulip.testserver',
full_name='The Bot of Hamlet',
user_id=bot_profile.id,
is_active=True,
api_key=result['api_key'],
avatar_url=result['avatar_url'],
default_sending_stream=None,
default_events_register_stream='Denmark',
default_all_public_streams=False,
owner='hamlet@zulip.com')
),
event['event']
)
self.assertEqual(event['users'], (user_profile.id,))
def test_add_bot_with_default_events_register_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
self.unsubscribe_from_stream("hamlet@zulip.com", "Denmark")
do_change_stream_invite_only(stream, True)
self.assert_num_bots_equal(0)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
'default_events_register_stream': 'Denmark',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_add_bot_with_default_all_public_streams(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
result = self.create_bot(default_all_public_streams=ujson.dumps(True))
self.assert_num_bots_equal(1)
self.assertTrue(result['default_all_public_streams'])
profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
self.assertEqual(profile.default_all_public_streams, True)
def test_deactivate_bot(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
self.deactivate_bot()
# You can deactivate the same bot twice.
self.deactivate_bot()
self.assert_num_bots_equal(0)
def test_deactivate_bogus_bot(self):
# type: () -> None
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
result = self.client_delete("/json/bots/bogus-bot@zulip.com")
self.assert_json_error(result, 'No such bot')
self.assert_num_bots_equal(1)
def test_bot_deactivation_attacks(self):
# type: () -> None
"""You cannot deactivate somebody else's bot."""
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to deactivate both Hamlet and
# Hamlet's bot.
self.login("othello@zulip.com")
# Can not deactivate a user as a bot
result = self.client_delete("/json/bots/hamlet@zulip.com")
self.assert_json_error(result, 'No such bot')
result = self.client_delete("/json/bots/hambot-bot@zulip.testserver")
self.assert_json_error(result, 'Insufficient permission')
# But we don't actually deactivate the other person's bot.
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(1)
# Can not deactivate a bot as a user
result = self.client_delete("/json/users/hambot-bot@zulip.testserver")
self.assert_json_error(result, 'No such user')
self.assert_num_bots_equal(1)
def test_bot_permissions(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.assert_num_bots_equal(0)
self.create_bot()
self.assert_num_bots_equal(1)
# Have Othello try to mess with Hamlet's bots.
self.login("othello@zulip.com")
result = self.client_post("/json/bots/hambot-bot@zulip.testserver/api_key/regenerate")
self.assert_json_error(result, 'Insufficient permission')
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_error(result, 'Insufficient permission')
def get_bot(self):
# type: () -> Dict[str, Any]
result = self.client_get("/json/bots")
bots = ujson.loads(result.content)['bots']
return bots[0]
def test_update_api_key(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.create_bot()
bot = self.get_bot()
old_api_key = bot['api_key']
result = self.client_post('/json/bots/hambot-bot@zulip.testserver/api_key/regenerate')
self.assert_json_success(result)
new_api_key = ujson.loads(result.content)['api_key']
self.assertNotEqual(old_api_key, new_api_key)
bot = self.get_bot()
self.assertEqual(new_api_key, bot['api_key'])
def test_update_api_key_for_invalid_user(self):
# type: () -> None
self.login("hamlet@zulip.com")
result = self.client_post('/json/bots/nonexistentuser@zulip.com/api_key/regenerate')
self.assert_json_error(result, 'No such user')
def test_patch_bot_full_name(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
full_name = ujson.loads(result.content)['full_name']
self.assertEqual('Fred', full_name)
bot = self.get_bot()
self.assertEqual('Fred', bot['full_name'])
def test_patch_bot_owner(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'bot_owner': 'othello@zulip.com',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
# Test bot's owner has been changed successfully.
bot_owner = ujson.loads(result.content)['bot_owner']
self.assertEqual(bot_owner, 'othello@zulip.com')
self.login('othello@zulip.com')
bot = self.get_bot()
self.assertEqual('The Bot of Hamlet', bot['full_name'])
@override_settings(LOCAL_UPLOADS_DIR='var/bot_avatar')
def test_patch_bot_avatar(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_GRAVATAR)
# Try error case first (too many files):
with get_test_image_file('img.png') as fp1, \
get_test_image_file('img.gif') as fp2:
result = self.client_patch_multipart(
'/json/bots/hambot-bot@zulip.testserver',
dict(file1=fp1, file2=fp2))
self.assert_json_error(result, 'You may only upload one file at a time')
profile = get_user_profile_by_email("hambot-bot@zulip.testserver")
self.assertEqual(profile.avatar_version, 1)
# HAPPY PATH
with get_test_image_file('img.png') as fp:
result = self.client_patch_multipart(
'/json/bots/hambot-bot@zulip.testserver',
dict(file=fp))
profile = get_user_profile_by_email('hambot-bot@zulip.testserver')
self.assertEqual(profile.avatar_version, 2)
# Make sure that avatar image that we've uploaded is same with avatar image in the server
self.assertTrue(filecmp.cmp(fp.name,
os.path.splitext(avatar_disk_path(profile))[0] +
".original"))
self.assert_json_success(result)
self.assertEqual(profile.avatar_source, UserProfile.AVATAR_FROM_USER)
self.assertTrue(os.path.exists(avatar_disk_path(profile)))
def test_patch_bot_to_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Denmark', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_sending_stream'])
def test_patch_bot_to_stream_not_subscribed(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Rome',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Rome', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Rome', bot['default_sending_stream'])
def test_patch_bot_to_stream_none(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': '',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_sending_stream = get_user_profile_by_email(
"hambot-bot@zulip.testserver").default_sending_stream
self.assertEqual(None, default_sending_stream)
bot = self.get_bot()
self.assertEqual(None, bot['default_sending_stream'])
def test_patch_bot_to_stream_private_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = self.subscribe_to_stream(user_profile.email, "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_sending_stream = ujson.loads(result.content)['default_sending_stream']
self.assertEqual('Denmark', default_sending_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_sending_stream'])
def test_patch_bot_to_stream_private_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
self.unsubscribe_from_stream("hamlet@zulip.com", "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_patch_bot_to_stream_not_found(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_sending_stream': 'missing',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_error(result, "Invalid stream name 'missing'")
def test_patch_bot_events_register_stream(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual('Denmark', default_events_register_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_allowed(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = self.subscribe_to_stream(user_profile.email, "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_events_register_stream']
self.assertEqual('Denmark', default_events_register_stream)
bot = self.get_bot()
self.assertEqual('Denmark', bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_denied(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
stream = get_stream("Denmark", user_profile.realm)
self.unsubscribe_from_stream("hamlet@zulip.com", "Denmark")
do_change_stream_invite_only(stream, True)
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'Denmark',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_error(result, "Invalid stream name 'Denmark'")
def test_patch_bot_events_register_stream_none(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': '',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_events_register_stream = get_user_profile_by_email(
"hambot-bot@zulip.testserver").default_events_register_stream
self.assertEqual(None, default_events_register_stream)
bot = self.get_bot()
self.assertEqual(None, bot['default_events_register_stream'])
def test_patch_bot_events_register_stream_not_found(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_events_register_stream': 'missing',
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_error(result, "Invalid stream name 'missing'")
def test_patch_bot_default_all_public_streams_true(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_all_public_streams': ujson.dumps(True),
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_all_public_streams']
self.assertEqual(default_events_register_stream, True)
bot = self.get_bot()
self.assertEqual(bot['default_all_public_streams'], True)
def test_patch_bot_default_all_public_streams_false(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'default_all_public_streams': ujson.dumps(False),
}
result = self.client_patch("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
default_events_register_stream = ujson.loads(result.content)['default_all_public_streams']
self.assertEqual(default_events_register_stream, False)
bot = self.get_bot()
self.assertEqual(bot['default_all_public_streams'], False)
def test_patch_bot_via_post(self):
# type: () -> None
self.login("hamlet@zulip.com")
bot_info = {
'full_name': 'The Bot of Hamlet',
'short_name': 'hambot',
}
result = self.client_post("/json/bots", bot_info)
self.assert_json_success(result)
bot_info = {
'full_name': 'Fred',
'method': 'PATCH'
}
result = self.client_post("/json/bots/hambot-bot@zulip.testserver", bot_info)
self.assert_json_success(result)
full_name = ujson.loads(result.content)['full_name']
self.assertEqual('Fred', full_name)
bot = self.get_bot()
self.assertEqual('Fred', bot['full_name'])
def test_patch_bogus_bot(self):
# type: () -> None
"""Deleting a bogus bot will succeed silently."""
self.login("hamlet@zulip.com")
self.create_bot()
bot_info = {
'full_name': 'Fred',
}
result = self.client_patch("/json/bots/nonexistent-bot@zulip.com", bot_info)
self.assert_json_error(result, 'No such user')
self.assert_num_bots_equal(1)
| dawran6/zulip | zerver/tests/test_bots.py | Python | apache-2.0 | 33,314 |
#!/usr/bin/env python
"""
Usage: make_lite.py <wrapped_routines_file> <lapack_dir> <output_dir>
Typical invocation:
make_lite.py wrapped_routines /tmp/lapack-3.x.x .
Requires the following to be on the path:
* f2c
* patch
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import subprocess
import fortran
import clapack_scrub
# Arguments to pass to f2c. You'll always want -A for ANSI C prototypes
# Others of interest: -a to not make variables static by default
# -C to check array subscripts
F2C_ARGS = ['-A', '-Nx800']
# The header to add to the top of the f2c_*.c file. Note that dlamch_() calls
# will be replaced by the macros below by clapack_scrub.scrub_source()
HEADER = '''\
/*
NOTE: This is generated code. Look in Misc/lapack_lite for information on
remaking this file.
*/
#include "f2c.h"
#ifdef HAVE_CONFIG
#include "config.h"
#else
extern doublereal dlamch_(char *);
#define EPSILON dlamch_("Epsilon")
#define SAFEMINIMUM dlamch_("Safe minimum")
#define PRECISION dlamch_("Precision")
#define BASE dlamch_("Base")
#endif
extern doublereal dlapy2_(doublereal *x, doublereal *y);
/*
f2c knows the exact rules for precedence, and so omits parentheses where not
strictly necessary. Since this is generated code, we don't really care if
it's readable, and we know what is written is correct. So don't warn about
them.
*/
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wparentheses"
#endif
'''
class FortranRoutine(object):
"""Wrapper for a Fortran routine in a file.
"""
type = 'generic'
def __init__(self, name=None, filename=None):
self.filename = filename
if name is None:
root, ext = os.path.splitext(filename)
name = root
self.name = name
self._dependencies = None
def dependencies(self):
if self._dependencies is None:
deps = fortran.getDependencies(self.filename)
self._dependencies = [d.lower() for d in deps]
return self._dependencies
def __repr__(self):
return "FortranRoutine({!r}, filename={!r})".format(self.name, self.filename)
class UnknownFortranRoutine(FortranRoutine):
"""Wrapper for a Fortran routine for which the corresponding file
is not known.
"""
type = 'unknown'
def __init__(self, name):
FortranRoutine.__init__(self, name=name, filename='<unknown>')
def dependencies(self):
return []
class FortranLibrary(object):
"""Container for a bunch of Fortran routines.
"""
def __init__(self, src_dirs):
self._src_dirs = src_dirs
self.names_to_routines = {}
def _findRoutine(self, rname):
rname = rname.lower()
for s in self._src_dirs:
ffilename = os.path.join(s, rname + '.f')
if os.path.exists(ffilename):
return self._newFortranRoutine(rname, ffilename)
return UnknownFortranRoutine(rname)
def _newFortranRoutine(self, rname, filename):
return FortranRoutine(rname, filename)
def addIgnorableRoutine(self, rname):
"""Add a routine that we don't want to consider when looking at
dependencies.
"""
rname = rname.lower()
routine = UnknownFortranRoutine(rname)
self.names_to_routines[rname] = routine
def addRoutine(self, rname):
"""Add a routine to the library.
"""
self.getRoutine(rname)
def getRoutine(self, rname):
"""Get a routine from the library. Will add if it's not found.
"""
unique = []
rname = rname.lower()
routine = self.names_to_routines.get(rname, unique)
if routine is unique:
routine = self._findRoutine(rname)
self.names_to_routines[rname] = routine
return routine
def allRoutineNames(self):
"""Return the names of all the routines.
"""
return list(self.names_to_routines.keys())
def allRoutines(self):
"""Return all the routines.
"""
return list(self.names_to_routines.values())
def resolveAllDependencies(self):
"""Try to add routines to the library to satisfy all the dependencies
for each routine in the library.
Returns a set of routine names that have the dependencies unresolved.
"""
done_this = set()
last_todo = set()
while True:
todo = set(self.allRoutineNames()) - done_this
if todo == last_todo:
break
for rn in todo:
r = self.getRoutine(rn)
deps = r.dependencies()
for d in deps:
self.addRoutine(d)
done_this.add(rn)
last_todo = todo
return todo
class LapackLibrary(FortranLibrary):
def _newFortranRoutine(self, rname, filename):
routine = FortranLibrary._newFortranRoutine(self, rname, filename)
if 'blas' in filename.lower():
routine.type = 'blas'
elif 'install' in filename.lower():
routine.type = 'config'
elif rname.startswith('z'):
routine.type = 'z_lapack'
elif rname.startswith('c'):
routine.type = 'c_lapack'
elif rname.startswith('s'):
routine.type = 's_lapack'
elif rname.startswith('d'):
routine.type = 'd_lapack'
else:
routine.type = 'lapack'
return routine
def allRoutinesByType(self, typename):
routines = sorted((r.name, r) for r in self.allRoutines() if r.type == typename)
return [a[1] for a in routines]
def printRoutineNames(desc, routines):
print(desc)
for r in routines:
print('\t%s' % r.name)
def getLapackRoutines(wrapped_routines, ignores, lapack_dir):
blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC')
if not os.path.exists(blas_src_dir):
blas_src_dir = os.path.join(lapack_dir, 'blas', 'src')
lapack_src_dir = os.path.join(lapack_dir, 'SRC')
if not os.path.exists(lapack_src_dir):
lapack_src_dir = os.path.join(lapack_dir, 'src')
install_src_dir = os.path.join(lapack_dir, 'INSTALL')
if not os.path.exists(install_src_dir):
install_src_dir = os.path.join(lapack_dir, 'install')
library = LapackLibrary([install_src_dir, blas_src_dir, lapack_src_dir])
for r in ignores:
library.addIgnorableRoutine(r)
for w in wrapped_routines:
library.addRoutine(w)
library.resolveAllDependencies()
return library
def getWrappedRoutineNames(wrapped_routines_file):
routines = []
ignores = []
with open(wrapped_routines_file) as fo:
for line in fo:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('IGNORE:'):
line = line[7:].strip()
ig = line.split()
ignores.extend(ig)
else:
routines.append(line)
return routines, ignores
types = {'blas', 'lapack', 'd_lapack', 's_lapack', 'z_lapack', 'c_lapack', 'config'}
def dumpRoutineNames(library, output_dir):
for typename in {'unknown'} | types:
routines = library.allRoutinesByType(typename)
filename = os.path.join(output_dir, typename + '_routines.lst')
with open(filename, 'w') as fo:
for r in routines:
deps = r.dependencies()
fo.write('%s: %s\n' % (r.name, ' '.join(deps)))
def concatenateRoutines(routines, output_file):
with open(output_file, 'w') as output_fo:
for r in routines:
with open(r.filename, 'r') as fo:
source = fo.read()
output_fo.write(source)
class F2CError(Exception):
pass
def runF2C(fortran_filename, output_dir):
try:
subprocess.check_call(
["f2c"] + F2C_ARGS + ['-d', output_dir, fortran_filename]
)
except subprocess.CalledProcessError:
raise F2CError
def scrubF2CSource(c_file):
with open(c_file) as fo:
source = fo.read()
source = clapack_scrub.scrubSource(source, verbose=True)
with open(c_file, 'w') as fo:
fo.write(HEADER)
fo.write(source)
def main():
if len(sys.argv) != 4:
print(__doc__)
return
wrapped_routines_file = sys.argv[1]
lapack_src_dir = sys.argv[2]
output_dir = sys.argv[3]
wrapped_routines, ignores = getWrappedRoutineNames(wrapped_routines_file)
library = getLapackRoutines(wrapped_routines, ignores, lapack_src_dir)
dumpRoutineNames(library, output_dir)
for typename in types:
fortran_file = os.path.join(output_dir, 'f2c_%s.f' % typename)
c_file = fortran_file[:-2] + '.c'
print('creating %s ...' % c_file)
routines = library.allRoutinesByType(typename)
concatenateRoutines(routines, fortran_file)
# apply the patch
patch_file = fortran_file + '.patch'
if os.path.exists(patch_file):
subprocess.check_call(['patch', '-u', fortran_file, patch_file])
print("Patched {}".format(fortran_file))
try:
runF2C(fortran_file, output_dir)
except F2CError:
print('f2c failed on %s' % fortran_file)
break
scrubF2CSource(c_file)
# patch any changes needed to the C file
c_patch_file = c_file + '.patch'
if os.path.exists(c_patch_file):
subprocess.check_call(['patch', '-u', c_file, c_patch_file])
print()
if __name__ == '__main__':
main()
| Ziqi-Li/bknqgis | numpy/numpy/linalg/lapack_lite/make_lite.py | Python | gpl-2.0 | 9,654 |
# #
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
EasyBuild support for installing the Intel Trace Analyzer and Collector (ITAC), implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.intelbase import IntelBase
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
class EB_itac(IntelBase):
"""
Class that can be used to install itac
- tested with Intel Trace Analyzer and Collector 7.2.1.008
"""
@staticmethod
def extra_options():
extra_vars = {
'preferredmpi': ['impi3', "Preferred MPI type", CUSTOM],
}
return IntelBase.extra_options(extra_vars)
def prepare_step(self, *args, **kwargs):
"""
Custom prepare step for itac: don't require runtime license for oneAPI versions (>= 2021)
"""
if LooseVersion(self.version) >= LooseVersion('2021'):
kwargs['requires_runtime_license'] = False
super(EB_itac, self).prepare_step(*args, **kwargs)
def install_step_classic(self):
"""
Actual installation for versions prior to 2021.x
- create silent cfg file
- execute command
"""
if LooseVersion(self.version) >= LooseVersion('8.1'):
super(EB_itac, self).install_step_classic(silent_cfg_names_map=None)
# itac v9.0.1 installer create itac/<version> subdir, so stuff needs to be moved afterwards
if LooseVersion(self.version) >= LooseVersion('9.0'):
super(EB_itac, self).move_after_install()
else:
silent = """
[itac]
INSTALLDIR=%(ins)s
LICENSEPATH=%(lic)s
INSTALLMODE=NONRPM
INSTALLUSER=NONROOT
INSTALL_ITA=YES
INSTALL_ITC=YES
DEFAULT_MPI=%(mpi)s
EULA=accept
""" % {'lic': self.license_file, 'ins': self.installdir, 'mpi': self.cfg['preferredmpi']}
# already in correct directory
silentcfg = os.path.join(os.getcwd(), "silent.cfg")
f = open(silentcfg, 'w')
f.write(silent)
f.close()
self.log.debug("Contents of %s: %s" % (silentcfg, silent))
tmpdir = os.path.join(os.getcwd(), self.version, 'mytmpdir')
try:
os.makedirs(tmpdir)
except OSError as err:
raise EasyBuildError("Directory %s can't be created: %s", tmpdir, err)
cmd = "./install.sh --tmp-dir=%s --silent=%s" % (tmpdir, silentcfg)
run_cmd(cmd, log_all=True, simple=True)
def install_step_oneapi(self, *args, **kwargs):
"""
Actual installation for versions 2021.x onwards.
"""
# require that EULA is accepted
intel_eula_url = 'https://software.intel.com/content/www/us/en/develop/articles/end-user-license-agreement.html'
self.check_accepted_eula(name='Intel-oneAPI', more_info=intel_eula_url)
# exactly one "source" file is expected: the (offline) installation script
if len(self.src) == 1:
install_script = self.src[0]['name']
else:
src_fns = ', '.join([x['name'] for x in self.src])
raise EasyBuildError("Expected to find exactly one 'source' file (installation script): %s", src_fns)
cmd = ' '.join([
"sh %s" % install_script,
'-a',
'-s',
"--eula accept",
"--install-dir=%s" % self.installdir,
])
run_cmd(cmd, log_all=True, simple=True)
# itac installer create itac/<version> subdir, so stuff needs to be moved afterwards
super(EB_itac, self).move_after_install()
def sanity_check_step(self):
"""Custom sanity check paths for ITAC."""
custom_paths = {
'files': ["include/%s" % x for x in ["i_malloc.h", "VT_dynamic.h", "VT.h", "VT.inc"]],
'dirs': ["bin", "lib", "slib"],
}
super(EB_itac, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""
A dictionary of possible directories to look for
"""
guesses = {}
if LooseVersion(self.version) < LooseVersion('9.0'):
preferredmpi = self.cfg["preferredmpi"]
guesses.update({
'MANPATH': ['man'],
'CLASSPATH': ['itac/lib_%s' % preferredmpi],
'VT_LIB_DIR': ['itac/lib_%s' % preferredmpi],
'VT_SLIB_DIR': ['itac/lib_s%s' % preferredmpi]
})
if self.cfg['m32']:
guesses.update({
'PATH': ['bin', 'bin/ia32', 'ia32/bin'],
'LD_LIBRARY_PATH': ['lib', 'lib/ia32', 'ia32/lib'],
})
else:
guesses.update({
'PATH': ['bin', 'bin/intel64', 'bin64'],
'LD_LIBRARY_PATH': ['lib', 'lib/intel64', 'lib64', 'slib'],
})
return guesses
def make_module_extra(self):
"""Overwritten from IntelBase to add extra txt"""
txt = super(EB_itac, self).make_module_extra()
txt += self.module_generator.set_environment('VT_ROOT', self.installdir)
txt += self.module_generator.set_environment('VT_MPI', self.cfg['preferredmpi'])
txt += self.module_generator.set_environment('VT_ADD_LIBS', "-ldwarf -lelf -lvtunwind -lnsl -lm -ldl -lpthread")
txt += self.module_generator.set_environment('VT_LIB_DIR', self.installdir + "/lib")
txt += self.module_generator.set_environment('VT_SLIB_DIR', self.installdir + "/slib")
return txt
| boegel/easybuild-easyblocks | easybuild/easyblocks/i/itac.py | Python | gpl-2.0 | 6,842 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('waifufmapp', '0007_auto_20170516_1042'),
]
operations = [
migrations.AlterField(
model_name='albumreview',
name='date',
field=models.DateField(default=datetime.datetime(2017, 5, 24, 17, 34, 15, 96531, tzinfo=utc)),
),
migrations.AlterUniqueTogether(
name='albumreview',
unique_together=set([]),
),
]
| Adria331/WaifuFM | waifufmapp/migrations/0008_auto_20170524_1734.py | Python | gpl-3.0 | 638 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
from odoo import _
from odoo import http
from odoo.http import request
class Twitter(http.Controller):
@http.route(['/twitter_reload'], type='json', auth="user", website=True)
def twitter_reload(self):
return request.website.fetch_favorite_tweets()
@http.route(['/get_favorites'], type='json', auth="public", website=True)
def get_tweets(self, limit=20):
key = request.website.twitter_api_key
secret = request.website.twitter_api_secret
screen_name = request.website.twitter_screen_name
debug = request.env['res.users'].has_group('website.group_website_publisher')
if not key or not secret:
if debug:
return {"error": _("Please set the Twitter API Key and Secret in the Website Settings.")}
return []
if not screen_name:
if debug:
return {"error": _("Please set a Twitter screen name to load favorites from, "
"in the Website Settings (it does not have to be yours)")}
return []
TwitterTweets = request.env['website.twitter.tweet']
tweets = TwitterTweets.search(
[('website_id', '=', request.website.id),
('screen_name', '=', screen_name)],
limit=int(limit), order="tweet_id desc")
if len(tweets) < 12:
if debug:
return {"error": _("Twitter user @%(username)s has less than 12 favorite tweets. "
"Please add more or choose a different screen name.") % \
{'username': screen_name}}
else:
return []
return tweets.mapped(lambda t: json.loads(t.tweet))
| chienlieu2017/it_management | odoo/addons/website_twitter/controllers/main.py | Python | gpl-3.0 | 1,861 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Datetools provide a method of manipulating and working dates and times.
# Copyright (C) 2013-2018 Chris Caron <lead2gold@gmail.com>
#
# This file is part of Datetools. Datetools is free software; you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This is just a simple tool for testing that the output is as expected
"""
from datetime import datetime
from dateblock import dateblock
from datetime import date
from datetime import time
# Support python datetime object
print dateblock("*/1", ref=datetime(2000, 5, 3, 10, 10, 0))
# Support python date object
print dateblock("*/1", ref=date(2000, 5, 3))
# Support python time object
print dateblock("*/1", ref=time(20, 5, 3), block=False)
# Time equals 'now'
print dateblock("*/1", ref=None, block=False)
# Epoch Time
print dateblock("*/1", ref=7999434323, block=False)
# Drift Time
print dateblock("*/10 +5", ref=7999434323, block=False)
# Blocking should never be possible if the time is in the past
print dateblock("*/10 +7", ref=999434323, block=True)
# Drifting inline
print dateblock("*/10 +5", ref=date(2000, 1, 1), block=False)
# Drifting inline (with option specified, inline should over-ride)
# Drifting without use of +
print dateblock("*/10 * * * * * 5", ref=date(2000, 2, 1), block=False)
# Drifting with multiple options specified
print dateblock("* 10 +5,8", ref=date(2000, 3, 1), block=False)
| caronc/datetools | src/pytest.py | Python | gpl-2.0 | 2,031 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio Demosite.
# Copyright (C) 2013 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
def get_creation_date(recid):
"""
Returns creation date for given record.
@param recid:
@return: Creation date
"""
from invenio.modules.records.models import Record as Bibrec
return Bibrec.query.get(recid).creation_date
| hachreak/invenio-demosite | invenio_demosite/base/recordext/functions/get_creation_date.py | Python | gpl-2.0 | 1,051 |
# *******************
# *** ANCHORS ***
# *******************
# Each anchor function takes two rect: a parent and a child.
# The child is arranged relative to the parent.
def northwest(par,chi):
chi.topleft = par.topleft
def north( par,chi ):
chi.midtop = par.midtop
def northeast(par,chi):
chi.topright = par.topright
def west(par,chi):
chi.midleft = par.midleft
def middle( par,chi):
chi.center = par.center
def east(par,chi):
chi.midright = par.midright
def southwest(par,chi):
chi.bottomleft = par.bottomleft
def south( par,chi ):
chi.midbottom = par.midbottom
def southeast(par,chi):
chi.bottomright = par.bottomright
EDGES = (west,northwest,north,northeast,east,southeast,south,southwest)
OPPOSING_CARDINALS = ((north,south),(east,west),(south,north),(west,east))
OPPOSING_PAIRS = ((northwest,southeast), (north,south), (northeast,southwest),
(west,east), (east,west), (southwest,northeast), (south,north),
(southeast, northwest))
| jwvhewitt/dmeternal | old_game/randmaps/anchors.py | Python | gpl-2.0 | 1,001 |
# pyOCD debugger
# Copyright (c) 2017-2021 Maxim Integrated (Part of Analog Devices)
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO = {
'load_address' : 0x20000000,
# Flash algorithm as a hex string
'instructions': [
0xe00abe00,
0x20604989, 0x6a4a6048, 0x44484888, 0x22006082, 0x688a624a, 0x68426042, 0x4270f022, 0x68426042,
0x5200f042, 0x68406042, 0x47706088, 0x688a497e, 0x4448487e, 0x68426042, 0x4270f022, 0x68426042,
0x6880608a, 0x47706248, 0x47702000, 0x47702000, 0xf7ffb500, 0x4b74ffd5, 0x48746899, 0x60414448,
0xf4216841, 0x6041417f, 0xf4416841, 0x6041412a, 0x60996841, 0xf0416841, 0x60410102, 0x60986840,
0x01c06898, 0xf7ffd4fc, 0x6a58ffd1, 0xf04f0780, 0xd5010000, 0x20016258, 0xb500bd00, 0x035b0b43,
0xffaef7ff, 0x600b4960, 0x4860688a, 0x60424448, 0xf4226842, 0x6042427f, 0xf4426842, 0x604242aa,
0x608a6842, 0xf0426842, 0x60420204, 0x60886840, 0x01c06888, 0x6a48d4fc, 0xd5050780, 0x62482000,
0xffa4f7ff, 0xbd002001, 0xffa0f7ff, 0xbd002000, 0x4613b5f8, 0x4605460c, 0xff82f7ff, 0x6881484a,
0x444a4a4a, 0x68516051, 0x6100f021, 0x68516051, 0x0110f041, 0x68516051, 0xe00e6081, 0x68196005,
0x68516301, 0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9, 0x1f241d1b, 0x2c041d2d, 0x06e9d301,
0x6811d1ec, 0xd1202980, 0xd31e2c10, 0x60516881, 0xf0216851, 0x60510110, 0x60816851, 0x68196005,
0x68596301, 0x68996341, 0x68d96381, 0x685163c1, 0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9,
0x3c103310, 0x2c103510, 0x2c04d2e8, 0x6881d31c, 0x68516051, 0x6100f021, 0x68516051, 0x0110f041,
0x68516051, 0x60056081, 0x63016819, 0xf0416851, 0x60510101, 0x60816851, 0x01c96881, 0x1d1bd4fc,
0x1d2d1f24, 0xd2ee2c04, 0xa119b314, 0x91006809, 0x21006886, 0x68566056, 0x6600f026, 0x68566056,
0x0610f046, 0x68566056, 0x466e6086, 0x7b01f813, 0x1c495477, 0xd1f91e64, 0x99006005, 0x68516301,
0x0101f041, 0x68516051, 0x68816081, 0xd4fc01c9, 0x07806a40, 0xf7ffd503, 0x2001ff09, 0xf7ffbdf8,
0x2000ff05, 0x0000bdf8, 0x40029000, 0x00000004, 0xffffffff, 0x00000000, 0x00000020, 0x00000000,
0x00000000
],
# Relative function addresses
'pc_init': 0x2000004d,
'pc_unInit': 0x20000051,
'pc_program_page': 0x200000f5,
'pc_erase_sector': 0x2000009f,
'pc_eraseAll': 0x20000055,
'static_base' : 0x20000000 + 0x00000004 + 0x00000234,
'begin_stack' : 0x20000448,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x400,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20001400], # Enable double buffering
'min_program_length' : 0x400,
# Relative region addresses and sizes
'ro_start': 0x0,
'ro_size': 0x234,
'rw_start': 0x234,
'rw_size': 0x10,
'zi_start': 0x244,
'zi_size': 0x0,
# Flash information
'flash_start': 0x0,
'flash_size': 0x40000,
'sector_sizes': (
(0x0, 0x2000),
)
}
class MAX32660(CoreSightTarget):
VENDOR = "Maxim"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0, length=0x40000, blocksize=0x2000, is_boot_memory=True, algo=FLASH_ALGO),
RamRegion( start=0x20000000, length=0x18000),
)
def __init__(self, session):
super().__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("max32660.svd")
| pyocd/pyOCD | pyocd/target/builtin/target_MAX32660.py | Python | apache-2.0 | 4,036 |
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from . import res_config_settings
from . import sale_global_discount_wizard
from . import sale_advance_payment_inv
| ingadhoc/sale | sale_ux/wizards/__init__.py | Python | agpl-3.0 | 362 |
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoader
class TyresCoUk(BaseSpider):
name = '4x4tyres.co.uk'
allowed_domains = ['4x4tyres.co.uk', 'www.4x4tyres.co.uk']
start_urls = ('http://www.4x4tyres.co.uk',)
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# categories
category_urls = hxs.select(u'//div[@id="categoryTab"]//a/@href').extract()
for url in category_urls:
yield Request(url)
# subcategories
subcategories_urls = hxs.select(u'//div[@id="thecategories"]//a/@href').extract()
for url in subcategories_urls:
yield Request(url)
# pagination
# next_page = hxs.select(u'').extract()
# if next_page:
# url = urljoin_rfc(URL_BASE, next_page[0])
# yield Request(url)
# products
product_urls = hxs.select(u'//div[@class="listingBox"]//div[@class="headergrid"]//a/@href').extract()
for url in product_urls:
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_xpath('name', u'//form/div[not(@class)]/h1[not(@class)]/text()')
product_loader.add_value('url', response.url)
product_loader.add_xpath('price', u'//form//div[@class="contentText"]//div[@class="PriceList"]/div[@class="pricenow"]/text()', re=u'\xa3(.*)')
product_loader.add_xpath('sku', u'//td[@class="ProductPageSummaryTableInfo" and preceding-sibling::td[@class="ProductPageSummaryTable" and contains(text(),"Model Number")]]/text()')
yield product_loader.load_item()
| 0--key/lib | portfolio/Python/scrapy/topgeartrading/4x4tyrescouk.py | Python | apache-2.0 | 2,085 |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright © 2010, RedJack, LLC.
# All rights reserved.
#
# Please see the LICENSE.txt file in this distribution for license
# details.
# ----------------------------------------------------------------------
import unittest
from ipset import *
from ipset.tests import *
IPV4_ADDR_1 = \
"\xc0\xa8\x01\x64"
IPV6_ADDR_1 = \
"\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x1e\xc2\xff\xfe\x9f\xe8\xe1"
class TestSet(unittest.TestCase):
def test_set_starts_empty(self):
s = IPSet()
self.assertFalse(s)
def test_empty_sets_equal(self):
s1 = IPSet()
s2 = IPSet()
self.assertEqual(s1, s2)
def test_ipv4_insert(self):
s = IPSet()
s.add(IPV4_ADDR_1)
self.assert_(s)
def test_ipv4_insert_network(self):
s = IPSet()
s.add_network(IPV4_ADDR_1, 24)
self.assert_(s)
def test_ipv6_insert(self):
s = IPSet()
s.add(IPV6_ADDR_1)
self.assert_(s)
def test_ipv6_insert_network(self):
s = IPSet()
s.add_network(IPV6_ADDR_1, 32)
self.assert_(s)
def test_file_empty(self):
s1 = IPSet.from_file(test_file("empty.set"))
s2 = IPSet()
self.assertEqual(s1, s2)
def test_file_just1_v4(self):
s1 = IPSet.from_file(test_file("just1-v4.set"))
s2 = IPSet()
s2.add(IPV4_ADDR_1)
self.assertEqual(s1, s2)
def test_file_just1_v6(self):
s1 = IPSet.from_file(test_file("just1-v6.set"))
s2 = IPSet()
s2.add(IPV6_ADDR_1)
self.assertEqual(s1, s2)
class TestMap(unittest.TestCase):
def test_map_starts_empty(self):
s = IPMap(0)
self.assertFalse(s)
def test_empty_maps_equal(self):
s1 = IPMap(0)
s2 = IPMap(0)
self.assertEqual(s1, s2)
def test_ipv4_insert(self):
s = IPMap(0)
s[IPV4_ADDR_1] = 1
self.assert_(s)
def test_ipv4_insert_network(self):
s = IPMap(0)
s.set_network(IPV4_ADDR_1, 24, 1)
self.assert_(s)
def test_ipv6_insert(self):
s = IPMap(0)
s[IPV6_ADDR_1] = 1
self.assert_(s)
def test_ipv6_insert_network(self):
s = IPMap(0)
s.set_network(IPV6_ADDR_1, 32, 1)
self.assert_(s)
def test_file_empty(self):
s1 = IPMap.from_file(test_file("empty.map"))
s2 = IPMap(0)
self.assertEqual(s1, s2)
def test_file_just1_v4(self):
s1 = IPMap.from_file(test_file("just1-v4.map"))
s2 = IPMap(0)
s2[IPV4_ADDR_1] = 1
self.assertEqual(s1, s2)
def test_file_just1_v6(self):
s1 = IPMap.from_file(test_file("just1-v6.map"))
s2 = IPMap(0)
s2[IPV6_ADDR_1] = 1
self.assertEqual(s1, s2)
| Jigsaw-Code/outline-client | third_party/shadowsocks-libev/libipset/python/src/ipset/tests/obj.py | Python | apache-2.0 | 2,867 |
"""
Copyright (c) 2017 James Patrick Dill, reshanie
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import re
import string
from dateutil.parser import parse as datetimeparse
r_date = re.compile(r"(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).(\d{0,3})Z", re.I)
def get_datetime(s: str):
try:
return datetimeparse(s)
except ValueError:
return datetime.datetime.strptime(s, "%b %d, %Y | %I:%M %p") # ex: Jul 4, 2017 | 4:12 PM
def matches(o, attrs):
for name, value in attrs.items():
if not hasattr(o, name):
return False
if not getattr(o, name) == value:
return False
return True
def get(iterator, **kwargs):
"""Finds an object in the given iterator with matching attributes to \*\*kwargs
:keyword kwargs: Attributes to match
:returns: Object found or None
"""
for obj in iterator:
if matches(obj, kwargs):
return obj
return None
number_re = re.compile(r"([\d\.]+)")
def numbers(s):
numbers_ = [float(n) for n in number_re.findall(s)]
return numbers_
def robux_parse(st):
"""
Converts Roblox format ROBUX strings to floats.
Example::
>>> robux_parse("93.6k")
93600
>>> robux_parse("21m")
21000000
>>> robux_parse("50")
50
:param str st: String to parse
:returns: int
"""
s = st.lower()
def check(s):
return s in string.digits or s == "."
num = float("".join(filter(check, s)))
if s[-1] == "k":
num *= 1000
elif s[-1] == "m":
num *= 1000000
return int(num)
| reshanie/roblox.py | roblox/utils.py | Python | mit | 2,717 |
import time
import datetime
from django.test import TestCase
from django.core.exceptions import ValidationError
from unittest import skip
from invoices.models import Invoice, Line_item, Currency, Vendor, Customer, Tax
from .utils import create_new_invoice, create_new_line_item, create_new_vendor, create_new_customer, create_new_tax, create_new_currency
class CreateAndRetrieveModelObjects(TestCase):
def test_creating_invoice(self):
new_invoice = create_new_invoice()
saved_invoice = Invoice.objects.first()
self.assertEqual(new_invoice.id, saved_invoice.id)
def test_creating_currency(self):
invoice_ = create_new_invoice()
new_currency = create_new_currency(invoice=invoice_)
saved_currency = Currency.objects.first()
self.assertEqual(new_currency, saved_currency)
def test_creating_tax(self):
invoice_ = create_new_invoice()
new_tax = create_new_tax(invoice=invoice_)
saved_tax = Tax.objects.first()
self.assertEqual(new_tax, saved_tax)
def test_creating_vendor(self):
new_vendor = create_new_vendor()
saved_vendor = Vendor.objects.first()
self.assertEqual(new_vendor, saved_vendor)
def test_creating_customer(self):
new_customer = create_new_customer()
saved_customer = Customer.objects.first()
self.assertEqual(new_customer, saved_customer)
def test_creating_line_item(self):
invoice_ = create_new_invoice()
new_line_item = create_new_line_item(invoice=invoice_)
saved_line_item = Line_item.objects.first()
self.assertEqual(new_line_item, saved_line_item)
class UpdateModelObjects(TestCase):
def test_updating_invoice(self):
new_invoice = create_new_invoice()
saved_invoice = Invoice.objects.first()
saved_invoice.invoice_number = '5555'
saved_invoice.invoice_date = datetime.date(2014, 1, 1)
saved_invoice.invoice_comment = 'New comment'
saved_invoice.save()
self.assertEqual(Invoice.objects.count(), 1)
updated_invoice = Invoice.objects.first()
self.assertEqual(updated_invoice.invoice_number, '5555')
self.assertEqual(updated_invoice.invoice_date, datetime.date(2014, 1, 1))
self.assertEqual(updated_invoice.invoice_comment, 'New comment')
def test_updating_currency(self):
invoice_ = create_new_invoice()
new_currency = create_new_currency(invoice=invoice_)
saved_currency = Currency.objects.first()
saved_currency.currency_name = 'CAD'
saved_currency.currency_symbol = '&'
saved_currency.save()
self.assertEqual(Currency.objects.count(), 1)
updated_currency = Currency.objects.first()
self.assertEqual(updated_currency.currency_name, 'CAD')
self.assertEqual(updated_currency.currency_symbol, '&')
def test_updating_tax(self):
invoice_ = create_new_invoice()
new_tax = create_new_tax(invoice=invoice_)
saved_tax = Tax.objects.first()
saved_tax.tax_rate = 25
saved_tax.tax_name = 'HST'
saved_tax.save()
self.assertEqual(Tax.objects.count(), 1)
updated_tax = Tax.objects.first()
self.assertEqual(updated_tax.tax_rate, 25)
self.assertEqual(updated_tax.tax_name, 'HST')
def test_updating_vendor(self):
new_vendor = create_new_vendor()
saved_vendor = Vendor.objects.first()
saved_vendor.vendor_name = 'New name'
saved_vendor.vendor_street_address = 'New street address'
saved_vendor.vendor_city = 'New City'
saved_vendor.vendor_state = 'NS'
saved_vendor.vendor_post_code = 'New code'
saved_vendor.vendor_phone_number = '555 555 5555'
saved_vendor.vendor_email_address = 'new@email.com'
saved_vendor.save()
self.assertEqual(Vendor.objects.count(), 1)
updated_vendor = Vendor.objects.first()
self.assertEqual(updated_vendor.vendor_name, 'New name')
self.assertEqual(updated_vendor.vendor_street_address, 'New street address')
self.assertEqual(updated_vendor.vendor_city, 'New City')
self.assertEqual(updated_vendor.vendor_state, 'NS')
self.assertEqual(updated_vendor.vendor_post_code, 'New code')
self.assertEqual(updated_vendor.vendor_phone_number, '555 555 5555')
self.assertEqual(updated_vendor.vendor_email_address, 'new@email.com')
def test_updating_customer(self):
new_customer = create_new_customer()
saved_customer = Customer.objects.first()
saved_customer.customer_name = 'New name'
saved_customer.customer_street_address = 'New street address'
saved_customer.customer_city = 'New City'
saved_customer.customer_state = 'NS'
saved_customer.customer_post_code = 'New code'
saved_customer.customer_phone_number = '555 555 5555'
saved_customer.customer_email_address = 'new@email.com'
saved_customer.save()
self.assertEqual(Customer.objects.count(), 1)
updated_customer = Customer.objects.first()
self.assertEqual(updated_customer.customer_name, 'New name')
self.assertEqual(updated_customer.customer_street_address, 'New street address')
self.assertEqual(updated_customer.customer_city, 'New City')
self.assertEqual(updated_customer.customer_state, 'NS')
self.assertEqual(updated_customer.customer_post_code, 'New code')
self.assertEqual(updated_customer.customer_phone_number, '555 555 5555')
self.assertEqual(updated_customer.customer_email_address, 'new@email.com')
def test_updating_line_item(self):
invoice_ = create_new_invoice()
new_line_item = create_new_line_item(invoice=invoice_)
saved_line_item = Line_item.objects.first()
saved_line_item.line_item = 'New item'
saved_line_item.line_item_description = 'Description of line item'
saved_line_item.line_item_price = 300
saved_line_item.line_item_quantity = 10
saved_line_item.save()
self.assertEqual(Line_item.objects.count(), 1)
updated_line_item = Line_item.objects.first()
self.assertEqual(updated_line_item.line_item, 'New item')
self.assertEqual(updated_line_item.line_item_description, 'Description of line item')
self.assertEqual(updated_line_item.line_item_price, 300)
self.assertEqual(updated_line_item.line_item_quantity, 10)
def test_invoice_can_have_multiple_taxes(self):
invoice_ = create_new_invoice()
first_new_tax = create_new_tax(invoice=invoice_)
second_new_tax = create_new_tax(invoice=invoice_)
self.assertEqual(Tax.objects.filter(invoice=invoice_).count(), 2)
self.assertNotEqual(first_new_tax, second_new_tax)
def test_invoice_can_have_multiple_line_items(self):
invoice_ = create_new_invoice()
first_new_line_item = create_new_line_item(invoice=invoice_)
second_new_line_item = create_new_line_item(invoice=invoice_)
self.assertEqual(Line_item.objects.filter(invoice=invoice_).count(), 2)
self.assertNotEqual(first_new_line_item, second_new_line_item)
class DeleteModelObjects(TestCase):
def test_delete_invoice(self):
''' If an invoice is deleted all associated currency, tax, customer
and line item objects should be deleted
as well or they'll become orphans '''
invoice_ = create_new_invoice()
new_currency = create_new_currency(invoice=invoice_)
first_new_tax = create_new_tax(invoice=invoice_)
new_line_item = create_new_line_item(invoice=invoice_)
new_customer = create_new_customer()
new_customer.invoice = invoice_
new_customer.save()
invoice_.delete()
self.assertEqual(Currency.objects.count(), 0)
self.assertEqual(Tax.objects.count(), 0)
self.assertEqual(Line_item.objects.count(), 0)
self.assertEqual(Customer.objects.count(), 0)
def test_delete_currency(self):
invoice_ = create_new_invoice()
new_currency = create_new_currency(invoice=invoice_)
Currency.objects.filter(invoice=invoice_).delete()
self.assertEqual(Currency.objects.count(), 0)
def test_delete_all_taxes_for_an_invoice(self):
invoice_ = create_new_invoice()
first_tax = create_new_tax(invoice=invoice_)
second_tax = create_new_tax(invoice=invoice_)
Tax.objects.filter(invoice=invoice_).delete()
self.assertEqual(Tax.objects.count(), 0)
def test_delete_individual_tax(self):
invoice_ = create_new_invoice()
first_tax = create_new_tax(invoice=invoice_)
second_tax = create_new_tax(invoice=invoice_)
Tax.objects.filter(id=first_tax.id).delete()
self.assertEqual(Tax.objects.filter(invoice=invoice_).count(), 1)
def test_delete_vendor(self):
invoice_ = create_new_invoice()
new_vendor = create_new_vendor()
new_vendor.invoice = invoice_
new_vendor.save()
Vendor.objects.filter(invoice=invoice_).delete()
self.assertEqual(Vendor.objects.count(), 0)
def test_delete_customer(self):
invoice_ = create_new_invoice()
new_customer = create_new_customer()
new_customer.invoice = invoice_
new_customer.save()
Customer.objects.filter(invoice=invoice_).delete()
self.assertEqual(Customer.objects.count(), 0)
def test_delete_all_line_items_for_an_invoice(self):
invoice_ = create_new_invoice()
first_line_item = create_new_line_item(invoice=invoice_)
second_line_item = create_new_line_item(invoice=invoice_)
Line_item.objects.filter(invoice=invoice_).delete()
self.assertEqual(Line_item.objects.count(), 0)
def test_delete_individual_line_item(self):
invoice_ = create_new_invoice()
first_line_item = create_new_line_item(invoice=invoice_)
second_line_item = create_new_line_item(invoice=invoice_)
Line_item.objects.filter(id=first_line_item.id).delete()
self.assertEqual(Line_item.objects.count(), 1)
class CalculatingTaxAndTotalsInTheModel(TestCase):
def test_line_item_totals_are_calculated_correctly(self):
invoice_ = create_new_invoice()
new_line_item = create_new_line_item(invoice=invoice_)
self.assertEqual(new_line_item.get_line_item_total(), 100)
def test_net_total_for_an_invoice_is_calculated_correctly(self):
invoice_ = create_new_invoice()
first_line_item = create_new_line_item(invoice=invoice_)
second_line_item = create_new_line_item(invoice=invoice_)
self.assertEqual(invoice_.get_net_total(), 200)
def test_tax_is_correctly_calculated(self):
invoice_ = create_new_invoice()
tax = create_new_tax(invoice=invoice_)
new_line_item = create_new_line_item(invoice=invoice_)
self.assertEqual(invoice_.get_tax_total(), 15)
def test_total_payable_is_calculated_correctly(self):
invoice_ = create_new_invoice()
first_tax = create_new_tax(invoice=invoice_)
second_tax = create_new_tax(invoice=invoice_)
first_line_item = create_new_line_item(invoice=invoice_)
second_line_item = create_new_line_item(invoice=invoice_)
self.assertEqual(invoice_.get_total_payable(), 260)
| bjdixon/invoice4django | invoices/tests/test_models.py | Python | mit | 10,579 |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for TestRunResults."""
from __future__ import absolute_import
import unittest
from pylib.base.base_test_result import BaseTestResult
from pylib.base.base_test_result import TestRunResults
from pylib.base.base_test_result import ResultType
class TestTestRunResults(unittest.TestCase):
def setUp(self):
self.p1 = BaseTestResult('p1', ResultType.PASS, log='pass1')
other_p1 = BaseTestResult('p1', ResultType.PASS)
self.p2 = BaseTestResult('p2', ResultType.PASS)
self.f1 = BaseTestResult('f1', ResultType.FAIL, log='failure1')
self.c1 = BaseTestResult('c1', ResultType.CRASH, log='crash1')
self.u1 = BaseTestResult('u1', ResultType.UNKNOWN)
self.tr = TestRunResults()
self.tr.AddResult(self.p1)
self.tr.AddResult(other_p1)
self.tr.AddResult(self.p2)
self.tr.AddResults(set([self.f1, self.c1, self.u1]))
def testGetAll(self):
self.assertFalse(
self.tr.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1]))
def testGetPass(self):
self.assertFalse(self.tr.GetPass().symmetric_difference(
[self.p1, self.p2]))
def testGetNotPass(self):
self.assertFalse(self.tr.GetNotPass().symmetric_difference(
[self.f1, self.c1, self.u1]))
def testGetAddTestRunResults(self):
tr2 = TestRunResults()
other_p1 = BaseTestResult('p1', ResultType.PASS)
f2 = BaseTestResult('f2', ResultType.FAIL)
tr2.AddResult(other_p1)
tr2.AddResult(f2)
tr2.AddTestRunResults(self.tr)
self.assertFalse(
tr2.GetAll().symmetric_difference(
[self.p1, self.p2, self.f1, self.c1, self.u1, f2]))
def testGetLogs(self):
log_print = ('[FAIL] f1:\n'
'failure1\n'
'[CRASH] c1:\n'
'crash1')
self.assertEqual(self.tr.GetLogs(), log_print)
def testGetShortForm(self):
short_print = ('ALL: 5 PASS: 2 FAIL: 1 '
'CRASH: 1 TIMEOUT: 0 UNKNOWN: 1 ')
self.assertEqual(self.tr.GetShortForm(), short_print)
def testGetGtestForm(self):
gtest_print = ('[==========] 5 tests ran.\n'
'[ PASSED ] 2 tests.\n'
'[ FAILED ] 3 tests, listed below:\n'
'[ FAILED ] f1\n'
'[ FAILED ] c1 (CRASHED)\n'
'[ FAILED ] u1 (UNKNOWN)\n'
'\n'
'3 FAILED TESTS')
self.assertEqual(gtest_print, self.tr.GetGtestForm())
def testRunPassed(self):
self.assertFalse(self.tr.DidRunPass())
tr2 = TestRunResults()
self.assertTrue(tr2.DidRunPass())
if __name__ == '__main__':
unittest.main()
| youtube/cobalt | build/android/pylib/base/base_test_result_unittest.py | Python | bsd-3-clause | 2,856 |
""".. Ignore pydocstyle D400.
===============
Local Connector
===============
"""
import logging
import subprocess
from resolwe.flow.models import Data
from resolwe.storage import settings as storage_settings
from resolwe.utils import BraceMessage as __
from .base import BaseConnector
logger = logging.getLogger(__name__)
class Connector(BaseConnector):
"""Local connector for job execution."""
def submit(self, data: Data, argv):
"""Run process locally.
For details, see
:meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
"""
logger.debug(
__(
"Connector '{}' running for Data with id {} ({}).",
self.__class__.__module__,
data.id,
repr(argv),
)
)
runtime_dir = storage_settings.FLOW_VOLUMES["runtime"]["config"]["path"]
subprocess.Popen(argv, cwd=runtime_dir, stdin=subprocess.DEVNULL).wait()
def cleanup(self, data_id: int):
"""Cleanup."""
| genialis/resolwe | resolwe/flow/managers/workload_connectors/local.py | Python | apache-2.0 | 1,053 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test_mapper.py
#
"""Tests for mapper"""
import unittest
import csv
import collections
from mapper import Mapper
from utils import Utils
from utils_for_tests import UtilsForTests
import timeit
class MapperTests(unittest.TestCase):
def setUp(self):
self.mapper = Mapper();
self.utils = Utils();
utils_for_tests = UtilsForTests();
self.test_map_scale_to_white_keys = utils_for_tests.loadTestCorpus('test_corpus/test_to_white_keys_corpus');
self.test_get_map = utils_for_tests.loadTestCorpus('test_corpus/test_get_map_corpus');
def test_mapScaleToWhiteKeys(self):
for case in self.test_map_scale_to_white_keys:
mapped_scale = self.mapper.mapScaleToWhiteKeys(case[0]);
self.assertDictEqual(mapped_scale, case[1]);
def test_getMap(self):
for case in self.test_get_map:
map = self.mapper.getMap(case[0],case[1]);
self.assertDictEqual(map, case[2]);
@unittest.skip("Preformance test")
def test_TimeitGetMap(self):
setup = "from utils import Utils; from mapper import Mapper; mapper = Mapper(); utils = Utils();"
code_to_test = """
for scale in utils.getAvailableScales():
for note in utils.getNotes():
mapper.getMap(note, scale);
"""
result_first = timeit.repeat(code_to_test, setup = setup,repeat=100, number=100);
result_avg = reduce(lambda x, y: x + y, result_first) / len(result_first)
print("Result avg: " + str(result_avg));
| doino-gretchenliev/Mid-Magic | tests/test_mapper.py | Python | apache-2.0 | 1,653 |
from django.conf import settings as djangosettings
from django.core.urlresolvers import reverse
from django.test import TestCase
import keyedcache
import livesettings
from livesettings import *
import logging
log = logging.getLogger('test');
class ConfigurationFunctionTest(TestCase):
def testSetSingleConfigItem(self):
value = IntegerValue(BASE_GROUP, 'SingleItem')
config_register(value)
self.assert_(config_exists(BASE_GROUP, 'SingleItem'))
def testSetTwoConfigItems(self):
s = [IntegerValue(BASE_GROUP, 'testTwoA'), StringValue(BASE_GROUP, 'testTwoB')]
config_register_list(*s)
self.assert_(config_exists(BASE_GROUP, 'testTwoA'))
self.assert_(config_exists(BASE_GROUP, 'testTwoB'))
def testSetGroup(self):
g1 = ConfigurationGroup('test1','test1')
value = IntegerValue(g1, 'SingleGroupedItem')
config_register(value)
self.assertFalse(config_exists(BASE_GROUP, 'SingleGroupedItem'))
self.assert_(config_exists(g1, 'SingleGroupedItem'))
class ConfigurationTestSettings(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g = ConfigurationGroup('test2','test2')
self.g = g
config_register(StringValue(g, 's1'))
config_register(IntegerValue(g, 's2', default=10))
config_register(IntegerValue(g, 's3', default=10))
def testSetSetting(self):
c = config_get('test2', 's1')
c.update('test')
self.assertEqual(c.value, 'test')
self.assertEqual(c.setting.value, 'test')
def testSettingDefault(self):
c = config_get('test2', 's2')
self.assertEqual(c.value, 10)
def testSetAndReset(self):
"""Test setting one value and then updating"""
c = config_get('test2', 's1')
c.update('test1')
self.assertEqual(c.value, 'test1')
# should be true, since it is an update
self.assert_(c.update('test2'))
self.assertEqual(c.value, 'test2')
def testTwice(self):
"""Config items should respond False to duplicate requests to update."""
c = config_get('test2', 's1')
c.update('test1')
self.assertFalse(c.update('test1'))
def testDeletesDefault(self):
c = config_get('test2', 's3')
# false because it isn't saving a default value
self.assertFalse(c.update(10))
self.assert_(c.update(20))
self.assertEqual(c.value, 20)
try:
s = c.setting
except SettingNotSet:
self.fail("Should have a setting now")
# now delete and go back to no setting by setting the default
self.assert_(c.update(10))
self.assertEqual(c.value, 10)
try:
s = c.setting
self.fail('Should throw SettingNotSet')
except SettingNotSet:
pass
class ConfigTestDotAccess(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g = ConfigurationGroup('test3','test3')
self.g = g
c1 = config_register(BooleanValue(g, 's1', default=True))
c2 = config_register(IntegerValue(g, 's2', default=10))
c2.update(100)
def testDotAccess(self):
self.assert_(ConfigurationSettings().test3.s1.value)
self.assertEqual(ConfigurationSettings().test3.s2.value, 100)
def testSettingProperty(self):
c = config_get('test3','s2')
s = c.setting
self.assert_(s.value, 100)
def testDictValues(self):
d = self.g.dict_values()
self.assertEqual(d, {'s1': True, 's2' : 100})
class ConfigTestModuleValue(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g = ConfigurationGroup('modules','module test')
self.g = g
self.c = config_register(ModuleValue(g, 'test'))
def testModule(self):
c = config_get('modules', 'test')
c.update('django')
self.assert_(hasattr(self.c.value, 'get_version'))
class ConfigTestSortOrder(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('group1', 'Group 1', ordering=-1001)
g2 = ConfigurationGroup('group2', 'Group 2', ordering=-1002)
g3 = ConfigurationGroup('group3', 'Group 3', ordering=-1003)
self.g1 = g1
self.g2 = g2
self.g3 = g3
self.g1c1 = config_register(IntegerValue(g1, 'c1'))
self.g1c2 = config_register(IntegerValue(g1, 'c2'))
self.g1c3 = config_register(IntegerValue(g1, 'c3'))
self.g2c1 = config_register(IntegerValue(g2, 'c1'))
self.g2c2 = config_register(IntegerValue(g2, 'c2'))
self.g2c3 = config_register(IntegerValue(g2, 'c3'))
self.g3c1 = config_register(IntegerValue(g3, 'c1'))
self.g3c2 = config_register(IntegerValue(g3, 'c2'))
self.g3c3 = config_register(IntegerValue(g3, 'c3'))
def testGroupOrdering(self):
mgr = ConfigurationSettings()
self.assertEqual(mgr[2].key, self.g1.key)
self.assertEqual(mgr[1].key, self.g2.key)
self.assertEqual(mgr[0].key, self.g3.key)
class TestMultipleValues(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('m1', 'Multiple Group 1', ordering=1000)
self.g1 = g1
self.g1c1 = config_register(MultipleStringValue(g1,
'c1',
choices=((1,'one'),(2,'two'),(3,'three'))))
def testSave(self):
c = config_get('m1','c1')
c.update([1,2])
self.assertEqual(c.value, [1,2])
def testAddChoice(self):
config_add_choice('m1','c1',(4, 'four'))
c = config_get('m1','c1')
self.assertEqual(c.choices, ((1,'one'),(2,'two'),(3,'three'),(4,'four')))
def testChoiceValues(self):
self.g1c1.update([1,2])
self.assertEqual(self.g1c1.value, [1,2])
self.assertEqual(self.g1c1.choice_values, [(1, 'one'),(2, 'two')])
choices = config_choice_values('m1', 'c1')
self.assertEqual(choices, [(1, 'one'),(2, 'two')])
class TestMultipleValuesWithDefault(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('mv2', 'Multiple Group 2', ordering=1000)
self.g1 = g1
self.g1c1 = config_register(MultipleStringValue(g1,
'c1',
choices=((1,'one'),(2,'two'),(3,'three')),
default=[1,2]))
def testDefault(self):
c = config_get('mv2','c1')
self.assertEqual(c.value, [1,2])
c.update([1,2,3])
self.assertEqual(c.value, [1,2,3])
class ConfigTestChoices(TestCase):
def testAddPreregisteredChoice(self):
"""Test that we can register choices before the config is actually set up."""
config_add_choice('ctg1', 'c1', ('a', 'Item A'))
config_add_choice('ctg1', 'c1', ('b', 'Item B'))
config_add_choice('ctg1', 'c1', ('c', 'Item C'))
g1 = ConfigurationGroup('ctg1', 'Choice 1', ordering=1000)
config_register(StringValue(g1, 'c1'))
c = config_get('ctg1','c1')
self.assertEqual(c.choices, [('a','Item A'), ('b','Item B'), ('c','Item C')])
class ConfigTestRequires(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('req1', 'Requirements 1', ordering=1000)
self.g1 = g1
bool1 = config_register(BooleanValue(g1, 'bool1', default=False, ordering=1))
bool2 = config_register(BooleanValue(g1, 'bool2', ordering=2))
self.g1c1 = config_register(IntegerValue(g1, 'c1', requires=bool1, ordering=3))
self.g1c2 = config_register(IntegerValue(g1, 'c2', requires=bool2, ordering=4))
self.g1c3 = config_register(IntegerValue(g1, 'c3', ordering=5))
bool2.update(True)
def testSimpleRequires(self):
v = config_value('req1', 'bool2')
self.assertTrue(v)
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, ['bool1', 'bool2', 'c2','c3'])
c = config_get('req1','bool1')
c.update(True)
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, ['bool1', 'bool2', 'c1', 'c2', 'c3'])
class ConfigTestRequiresChoices(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('req2', 'Requirements 2', ordering=1000)
self.g1 = g1
choices1 = config_register(MultipleStringValue(BASE_GROUP, 'rc1', ordering=1))
self.g1c1 = config_register(IntegerValue(g1, 'c1', requires=choices1, ordering=3))
self.g1c2 = config_register(IntegerValue(g1, 'c2', requires=choices1, ordering=4))
self.g1c3 = config_register(IntegerValue(g1, 'c3', ordering=5))
choices1.update('c1')
g2 = ConfigurationGroup('req3', 'Requirements 3', ordering=1000)
self.g2 = g2
choices2 = config_register(StringValue(BASE_GROUP, 'choices2', ordering=1))
self.g2c1 = config_register(IntegerValue(g2, 'c1', requires=choices2, ordering=3))
self.g2c2 = config_register(IntegerValue(g2, 'c2', requires=choices2, ordering=4))
self.g2c3 = config_register(IntegerValue(g2, 'c3', requires=choices2, ordering=5))
choices2.update('c1')
def testSimpleRequiresChoices(self):
v = config_value('BASE', 'rc1')
self.assertEquals(v, ['c1'])
g = config_get_group('req2')
keys = [cfg.key for cfg in g]
self.assertEqual(keys, ['c1','c3'])
c = config_get('BASE', 'rc1')
c.update(['c1','c2'])
g = config_get_group('req2')
keys = [cfg.key for cfg in g]
self.assertEqual(keys, ['c1', 'c2', 'c3'])
def testRequiresSingleValue(self):
v = config_value('BASE', 'choices2')
self.assertEquals(v, 'c1')
keys = [cfg.key for cfg in self.g2]
self.assertEqual(keys, ['c1'])
c = config_get('BASE', 'choices2')
c.update('c2')
keys = [cfg.key for cfg in self.g2]
self.assertEqual(keys, ['c2'])
class ConfigTestRequiresValue(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
g1 = ConfigurationGroup('reqval', 'Requirements 3', ordering=1000)
self.g1 = g1
choices1 = config_register(MultipleStringValue(BASE_GROUP, 'valchoices', ordering=1))
self.g1c1 = config_register(IntegerValue(g1, 'c1', requires=choices1, requiresvalue='foo', ordering=3))
self.g1c2 = config_register(IntegerValue(g1, 'c2', requires=choices1, requiresvalue='bar', ordering=4))
self.g1c3 = config_register(IntegerValue(g1, 'c3', ordering=5))
choices1.update('foo')
g2 = ConfigurationGroup('reqval2', 'Requirements 4', ordering=1000)
self.g2 = g2
choices2 = config_register(StringValue(BASE_GROUP, 'valchoices2', ordering=1,
choices=(('a','test a'),('b', 'test b'),('c', 'test c'))))
self.g2c1 = config_register(IntegerValue(g2, 'c1', requires=choices2, requiresvalue='a', ordering=3))
self.g2c2 = config_register(IntegerValue(g2, 'c2', requires=choices2, requiresvalue='b', ordering=4))
self.g2c3 = config_register(IntegerValue(g2, 'c3', requires=choices2, requiresvalue='c', ordering=5))
choices2.update('a')
def testRequiresValue(self):
v = config_value('BASE', 'valchoices')
self.assertEquals(v, ['foo'])
g = config_get_group('reqval')
keys = [cfg.key for cfg in g]
self.assertEqual(keys, ['c1','c3'])
c = config_get('BASE', 'valchoices')
c.update(['foo','bar'])
g = config_get_group('reqval')
keys = [cfg.key for cfg in g]
self.assertEqual(keys, ['c1', 'c2', 'c3'])
def testRequiresSingleValue(self):
v = config_value('BASE', 'valchoices2')
self.assertEquals(v, 'a')
keys = [cfg.key for cfg in self.g2]
self.assertEqual(keys, ['c1'])
c = config_get('BASE', 'valchoices2')
c.update('b')
keys = [cfg.key for cfg in self.g2]
self.assertEqual(keys, ['c2'])
class ConfigTestGroupRequires(TestCase):
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
choices1 = config_register(MultipleStringValue(BASE_GROUP, 'groupchoice', ordering=1))
choices2 = config_register(MultipleStringValue(BASE_GROUP, 'groupchoice2', ordering=1))
g1 = ConfigurationGroup('groupreq', 'Requirements 4', ordering=1000, requires=choices1)
self.g1 = g1
self.g1c1 = config_register(IntegerValue(g1, 'c1', ordering=3))
self.g1c2 = config_register(IntegerValue(g1, 'c2', requires=choices2, requiresvalue='bar', ordering=4))
self.g1c3 = config_register(IntegerValue(g1, 'c3', ordering=5))
def testRequiresValue(self):
c = config_get('BASE', 'groupchoice')
self.assertEquals(c.value, [])
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, [])
c2 = config_get('BASE', 'groupchoice2')
c2.update('bar')
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, ['c2'])
c.update(['groupreq'])
keys = [cfg.key for cfg in self.g1]
self.assertEqual(keys, ['c1', 'c2', 'c3'])
class ConfigCollectGroup(TestCase):
def setUp(self):
keyedcache.cache_delete()
choices = config_register(MultipleStringValue(BASE_GROUP, 'collect', ordering=1))
self.choices = choices
g1 = ConfigurationGroup('coll1', 'Collection 1')
g2 = ConfigurationGroup('coll2', 'Collection 2')
g3 = ConfigurationGroup('coll3', 'Collection 3')
g1c1 = config_register(StringValue(g1, 'test'))
g1c2 = config_register(StringValue(g1, 'test1'))
g2c1 = config_register(StringValue(g2, 'test'))
g3c1 = config_register(StringValue(g3, 'test'))
g1c1.update('set a')
g1c2.update('set b')
g2c1.update('set a')
g3c1.update('set d')
choices.update(['coll1','coll3'])
def testCollectSimple(self):
v = config_collect_values('BASE', 'collect', 'test')
self.assertEqual(v, ['set a', 'set d'])
def testCollectUnique(self):
self.choices.update(['coll1','coll2','coll3'])
v = config_collect_values('BASE', 'collect', 'test', unique=False)
self.assertEqual(v, ['set a', 'set a', 'set d'])
v = config_collect_values('BASE', 'collect', 'test', unique=True)
self.assertEqual(v, ['set a', 'set d'])
class LongSettingTest(TestCase):
def setUp(self):
keyedcache.cache_delete()
wide = config_register(LongStringValue(BASE_GROUP, 'LONG', ordering=1, default="woot"))
self.wide = wide
self.wide.update('*' * 1000)
def testLongStorage(self):
w = config_value('BASE', 'LONG')
self.assertEqual(len(w), 1000)
self.assertEqual(w, '*'*1000)
def testShortInLong(self):
self.wide.update("test")
w = config_value('BASE', 'LONG')
self.assertEqual(len(w), 4)
self.assertEqual(w, 'test')
def testDelete(self):
remember = self.wide.setting.id
self.wide.update('woot')
try:
q = LongSetting.objects.get(pk = remember)
self.fail("Should be deleted")
except LongSetting.DoesNotExist:
pass
class OverrideTest(TestCase):
"""Test settings overrides"""
def setUp(self):
# clear out cache from previous runs
keyedcache.cache_delete()
djangosettings.LIVESETTINGS_OPTIONS = {
1 : {
'DB' : False,
'SETTINGS' : {
'overgroup' : {
's2' : '100',
'choices' : '["one","two","three"]'
}
}
}
}
g = ConfigurationGroup('overgroup','Override Group')
self.g = g
config_register(StringValue(g, 's1'))
config_register(IntegerValue(g, 's2', default=10))
config_register(IntegerValue(g, 's3', default=10))
config_register(MultipleStringValue(g, 'choices'))
def tearDown(self):
djangosettings.LIVESETTINGS_OPTIONS = {}
def testOverriddenSetting(self):
"""Accessing an overridden setting should give the override value."""
c = config_get('overgroup', 's2')
self.assertEquals(c.value, 100)
def testCantChangeSetting(self):
"""When overridden, setting a value should not work, should get the overridden value"""
c = config_get('overgroup', 's2')
c.update(1)
c = config_get('overgroup', 's2')
self.assertEquals(c.value, 100)
def testNotOverriddenSetting(self):
"""Settings which are not overridden should return their defaults"""
c = config_get('overgroup', 's3')
self.assertEquals(c.value, 10)
def testOverriddenListSetting(self):
"""Make sure lists work when overridden"""
c = config_get('overgroup', 'choices')
v = c.value
self.assertEqual(len(v), 3)
self.assertEqual(v[0], "one")
self.assertEqual(v[1], "two")
self.assertEqual(v[2], "three")
class PermissionTest(TestCase):
"""Test access permissions"""
urls = 'livesettings.test_urls'
def setUp(self):
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
# Users with different permissions
# staff member
user1 = User.objects.create_user('warehouseman', 'john@example.com', 'secret')
user1.is_staff = True
user1.save()
# developper with limited permissions
user2 = User.objects.create_user('cautious_developer', 'fred@example.com', 'secret')
user2.is_staff = True
user2.user_permissions.add(Permission.objects.get(codename='change_setting', \
content_type=ContentType.objects.get(app_label='livesettings', model='setting')))
user2.save()
# superuser
user3 = User.objects.create_user('superuser', 'paul@example.com', 'secret')
user3.is_superuser = True
user3.save()
keyedcache.cache_delete()
# Example config
config_register(IntegerValue(BASE_GROUP, 'SingleItem', default=0))
def test_unauthorized_form(self):
"Testing users without enought additional permission"
# usually login_url_mask % nexturl is '/accounts/login/?next=/settings/'
login_url_mask = '%s?next=%%s' % reverse('django.contrib.auth.views.login')
# unauthorized
response = self.client.get(reverse('satchmo_site_settings')) # usually '/settings/'
self.assertRedirects(response, login_url_mask % '/settings/', msg_prefix='unathorized user should first login')
# few authorized
self.client.login(username='warehouseman', password='secret')
response = self.client.get(reverse('satchmo_site_settings'))
self.assertRedirects(response, login_url_mask % '/settings/', msg_prefix='user with small permission should not read normal settings')
# authorized enough but not for secret values
self.client.login(username='cautious_developer', password='secret')
response = self.client.get(reverse('settings_export')) # usually '/settings/export/'
self.assertRedirects(response, login_url_mask % '/settings/export/', msg_prefix='user without superuser permission should not export sensitive settings')
def test_authorized_enough(self):
"Testing a sufficiently authorized user"
self.client.login(username='cautious_developer', password='secret')
response = self.client.get(reverse('satchmo_site_settings'))
self.assertContains(response, 'SingleItem')
self.client.login(username='superuser', password='secret')
response = self.client.get(reverse('settings_export'))
self.assertContains(response, 'LIVESETTINGS_OPTIONS = ')
def test_export(self):
"Details of exported settings"
self.client.login(username='superuser', password='secret')
val2 = IntegerValue(BASE_GROUP, 'ModifiedItem', default=0)
config_register(val2)
val2.update(6789)
response = self.client.get('/settings/export/')
self.assertContains(response, "LIVESETTINGS_OPTIONS =", 1)
self.assertContains(response, "'DB': False", 1)
self.assertContains(response, "u'BASE':",1)
self.assertContains(response, "u'ModifiedItem': u'6789'", 1)
def test_secret_password(self):
"Verify that password is saved but not re-echoed if render_value=False"
# example of value, where reading is more sensitive than writing
val1 = PasswordValue(BASE_GROUP, 'password_to_reading_external_payment_gateway', render_value=False)
config_register(val1)
val1.update('secret')
val2 = PasswordValue(BASE_GROUP, 'unsecure_password')
config_register(val2)
val2.update('unsecure_pwd')
self.client.login(username='superuser', password='secret')
response = self.client.get('/settings/')
self.assertContains(response, 'password_to_reading_external_payment_gateway')
self.assertNotContains(response, 'secret')
self.assertContains(response, 'unsecure_password')
self.assertContains(response, 'unsecure_pwd')
class WebClientPostTest(TestCase):
"""
Tests of the web interface with POST.
These tests require temporary removing all earlier defined values.
Then are all values restored because it can be important for testing an application which uses livesettings.
"""
urls = 'livesettings.test_urls'
def setUp(self):
from django.contrib.auth.models import User
from django.utils.datastructures import SortedDict
# The following hack works like completely replaced ConfigurationSettings internal state only, if
# no the same group name is used inside and outside the test.
self.saved_conf_inst = ConfigurationSettings._ConfigurationSettings__instance.settings
ConfigurationSettings.__dict__['_ConfigurationSettings__instance'].settings = SortedDict()
keyedcache.cache_delete()
# set new users and values
user = User.objects.create_user('admin', 'admin@example.com', 'secret')
user.is_superuser = True
user.save()
self.client.login(username='admin', password='secret')
GROUP2 = ConfigurationGroup('Group2', 'g')
value = IntegerValue(GROUP2, 'SingleItem')
config_register(value)
def tearDown(self):
# restore the original configuration
ConfigurationSettings.__dict__['_ConfigurationSettings__instance'].settings = self.saved_conf_inst
def test_post(self):
"Tests of POST, verify is saved"
response = self.client.post('/settings/', {'Group2__SingleItem': '7890'})
# test can not use assertRedirects because it would consume the next get
self.assertEqual((response.status_code, response.get('Location', '')), (302, 'http://testserver/settings/'))
response = self.client.get('/settings/')
self.assertContains(response, 'Updated')
self.assertContains(response, '7890')
def test_empty_fields(self):
"test an empty value in the form should not raise an exception"
# Some test features had been temporary commented out before some ..Values classes are fixed
# because I do not want to display many old inconsistencies now. (hynekcer)
def extract_val(content):
regr = re.search(r'SingleItem.*value="([^"]*)"', content, flags=re.MULTILINE)
return regr and regr.group(1) or '' # html value
def get_setting_like_in_db(x):
try:
return x.setting.value
except SettingNotSet:
return 'Error'
def test_empty_value_type(value_type, protocol, reject_empty=False):
"empty value can be accepted or rejected by validation rules"
value = value_type(GROUP2, 'SingleItem') # first it does it to easy get the class name
type_name = value.__class__.__name__
value = value_type(GROUP2, 'SingleItem', description = 'type %s' % type_name)
config_register(value)
response = self.client.get('/settings/')
html_value = extract_val(response.content)
#print '%s "%s"' % (type_name, html_value)
response = self.client.post('/settings/', {'Group2__SingleItem': ''}) # See in the traceback a line one level Up
if reject_empty:
# option reject_empty had been tested before all Value types were fixed to be similar accepting empty value
# this is a typical text from validation warning
self.assertContains(response, 'Please correct the error below.')
else:
self.assertRedirects(response, '/settings/')
response = self.client.get('/settings/')
html_value = extract_val(response.content)
#print '%s "%s" "%s" "%s"' % (type_name, html_value, value.value, get_setting_like_in_db(value))
#self.assertNotContains(response, '<object object at 0x[0-9a-f]+>') # rendered NOTSET = object()
#if re.search('SingleItem.*value="', response.content):
# self.assertTrue(re.search('SingleItem.*value="([0.]*|\[\])"', response.content))
protocol.add(value_type)
#
import re
GROUP2 = ConfigurationGroup('Group2', 'g')
protocol = set()
# tested values
test_empty_value_type(BooleanValue, protocol)
test_empty_value_type(DecimalValue, protocol)
test_empty_value_type(DurationValue, protocol)
test_empty_value_type(FloatValue, protocol)
test_empty_value_type(IntegerValue, protocol)
test_empty_value_type(PositiveIntegerValue, protocol)
test_empty_value_type(StringValue, protocol)
test_empty_value_type(LongStringValue, protocol)
test_empty_value_type(MultipleStringValue, protocol)
test_empty_value_type(LongMultipleStringValue, protocol)
test_empty_value_type(ModuleValue, protocol)
test_empty_value_type(PasswordValue, protocol)
# verify completness of the test
classes_to_test = set(getattr(livesettings.values, k) for k in livesettings.values.__all__ if \
not k in ('BASE_GROUP', 'ConfigurationGroup', 'Value', 'SortedDotDict', 'PercentValue'))
self.assertEqual(protocol, classes_to_test, msg='The tested classes have been not all exactly the same as expected')
def test_csrf(self):
"test CSRF"
from django.test import Client
csrf_client = Client(enforce_csrf_checks=True)
csrf_client.login(username='admin', password='secret')
# get CSFR token
response = csrf_client.get('/settings/')
csrfmiddlewaretoken = str(response.context['csrf_token'])
self.assertContains(response, csrfmiddlewaretoken, msg_prefix='has not csrf')
# expect OK
response = csrf_client.post('/settings/', {'Group2__SingleItem': '1234', 'csrfmiddlewaretoken': csrfmiddlewaretoken})
self.assertRedirects(response, expected_url='/settings/')
# expect 403
response = csrf_client.post('/settings/', {'Group2__SingleItem': '1234'})
self.assertContains(response, 'CSRF', status_code=403, msg_prefix='should require csrf')
| pombredanne/django-livesettings | livesettings/tests.py | Python | bsd-3-clause | 28,083 |
# jhbuild - a tool to ease building collections of source packages
# Copyright (C) 2007 Mariano Suarez-Alvarez
#
# notify.py: using libnotify
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
try:
import dbus
except ImportError:
dbus = None
class Notify:
def __init__(self, config = None):
self.disabled = False
self.notif_id = 0
self.iface = self.get_iface()
if (config and config.nonotify) or self.iface is None:
self.disabled = True
def get_iface(self):
if dbus is None:
return None
try:
bus = dbus.SessionBus()
proxy = bus.get_object('org.freedesktop.Notifications',
'/org/freedesktop/Notifications')
return dbus.Interface(proxy, dbus_interface='org.freedesktop.Notifications')
except dbus.exceptions.DBusException:
return None
def reset(self):
self.notif_id = 0
self.iface = self.get_iface()
def notify(self, summary, body, icon = "", expire = 0):
'''emit a notification'''
if self.disabled:
return
try:
self.notif_id = self.iface.Notify("jhbuild", self.notif_id, icon,
summary, body, [], {}, 1000*expire)
except dbus.exceptions.DBusException:
self.reset()
def clear(self):
if self.notif_id != 0:
try:
self.iface.CloseNotification(self.notif_id)
self.notif_id = 0
except dbus.exceptions.DBusException:
self.reset()
if __name__ == "__main__":
n = Notify()
n.notify("A summary", "A body text")
| silvermagic/jhbuild | jhbuild/utils/notify.py | Python | gpl-2.0 | 2,371 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Collectd plugin for collecting docker container stats
#
# Copyright © 2015 eNovance
#
# Authors:
# Sylvain Baubeau <sylvain.baubeau@enovance.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Requirements: docker-py
import dateutil.parser
from distutils.version import StrictVersion
import docker
import os
import threading
import time
import sys
import re
def _c(c):
"""A helper method for representing a container in messages. If the given
argument is a string, it is assumed to be the container's ID and only the
first 7 digits will be returned. If it's a dictionary, the string returned
is <7-digit ID>/<name>."""
if type(c) == str or type(c) == unicode:
return c[:7]
return '{id}/{name}'.format(id=c['Id'][:7], name=c['Name'])
class Stats:
@classmethod
def emit(cls, container, type, value, t=None, type_instance=None):
val = collectd.Values()
val.plugin = 'docker'
val.plugin_instance = container['Name']
if type:
val.type = type
if type_instance:
val.type_instance = type_instance
if t:
val.time = time.mktime(dateutil.parser.parse(t).timetuple())
else:
val.time = time.time()
# With some versions of CollectD, a dummy metadata map must to be added
# to each value for it to be correctly serialized to JSON by the
# write_http plugin. See
# https://github.com/collectd/collectd/issues/716
val.meta = {'true': 'true'}
val.values = value
val.dispatch()
@classmethod
def read(cls, container, stats, t):
raise NotImplementedError
class BlkioStats(Stats):
@classmethod
def read(cls, container, stats, t):
blkio_stats = stats['blkio_stats']
for key, values in blkio_stats.items():
# Block IO stats are reported by block device (with major/minor
# numbers). We need to group and report the stats of each block
# device independently.
device_stats = {}
for value in values:
k = '{key}-{major}-{minor}'.format(key=key,
major=value['major'],
minor=value['minor'])
if k not in device_stats:
device_stats[k] = []
device_stats[k].append(value['value'])
for type_instance, values in device_stats.items():
if len(values) == 5:
cls.emit(container, 'blkio', values,
type_instance=type_instance, t=t)
elif len(values) == 1:
# For some reason, some fields contains only one value and
# the 'op' field is empty. Need to investigate this
cls.emit(container, 'blkio.single', values,
type_instance=key, t=t)
else:
collectd.warn(('Unexpected number of blkio stats for '
'container {container}!')
.format(container=_c(container)))
class CpuStats(Stats):
@classmethod
def read(cls, container, stats, t):
cpu_stats = stats['cpu_stats']
cpu_usage = cpu_stats['cpu_usage']
percpu = cpu_usage['percpu_usage']
for cpu, value in enumerate(percpu):
cls.emit(container, 'cpu.percpu.usage', [value],
type_instance='cpu%d' % (cpu,), t=t)
items = sorted(cpu_stats['throttling_data'].items())
cls.emit(container, 'cpu.throttling_data', [x[1] for x in items], t=t)
system_cpu_usage = cpu_stats['system_cpu_usage']
values = [cpu_usage['total_usage'], cpu_usage['usage_in_kernelmode'],
cpu_usage['usage_in_usermode'], system_cpu_usage]
cls.emit(container, 'cpu.usage', values, t=t)
# CPU Percentage based on calculateCPUPercent Docker method
# https://github.com/docker/docker/blob/master/api/client/stats.go
cpu_percent = 0.0
if 'precpu_stats' in stats:
precpu_stats = stats['precpu_stats']
precpu_usage = precpu_stats['cpu_usage']
cpu_delta = cpu_usage['total_usage'] - precpu_usage['total_usage']
system_delta = system_cpu_usage - precpu_stats['system_cpu_usage']
if system_delta > 0 and cpu_delta > 0:
cpu_percent = 100.0 * cpu_delta / system_delta * len(percpu)
cls.emit(container, "cpu.percent", ["%.2f" % (cpu_percent)], t=t)
class NetworkStats(Stats):
@classmethod
def read(cls, container, stats, t):
items = sorted(stats['network'].items())
cls.emit(container, 'network.usage', [x[1] for x in items], t=t)
class MemoryStats(Stats):
@classmethod
def read(cls, container, stats, t):
mem_stats = stats['memory_stats']
values = [mem_stats['limit'], mem_stats['max_usage'],
mem_stats['usage']]
cls.emit(container, 'memory.usage', values, t=t)
for key, value in mem_stats['stats'].items():
cls.emit(container, 'memory.stats', [value],
type_instance=key, t=t)
mem_percent = 100.0 * mem_stats['usage'] / mem_stats['limit']
cls.emit(container, 'memory.percent', ["%.2f" % mem_percent], t=t)
class ContainerStats(threading.Thread):
"""
A thread that continuously consumes the stats stream from a container,
keeping the most recently read stats available for processing by CollectD.
Such a mechanism is required because the first read from Docker's stats API
endpoint can take up to one second. Hitting this endpoint for every
container running on the system would only be feasible if the number of
running containers was less than the polling interval of CollectD. Above
that, and the whole thing breaks down. It is thus required to maintain open
the stats stream and read from it, but because it is a continuous stream we
need to be continuously consuming from it to make sure that when CollectD
requests a plugin read, it gets the latest stats data from each container.
The role of this thread is to keep consuming from the stats endpoint (it's
a blocking stream read, getting stats data from the Docker daemon every
second), and make the most recently read data available in a variable.
"""
def __init__(self, container, client):
threading.Thread.__init__(self)
self.daemon = True
self.stop = False
self._container = container
self._client = client
self._feed = None
self._stats = None
# Automatically start stats reading thread
self.start()
def run(self):
collectd.info('Starting stats gathering for {container}.'
.format(container=_c(self._container)))
failures = 0
while not self.stop:
try:
if not self._feed:
self._feed = self._client.stats(self._container,
decode=True)
self._stats = self._feed.next()
# Reset failure count on successfull read from the stats API.
failures = 0
except Exception, e:
collectd.warning('Error reading stats from {container}: {msg}'
.format(container=_c(self._container), msg=e))
# If we encounter a failure, wait a second before retrying and
# mark the failures. After three consecutive failures, we'll
# stop the thread. If the container is still there, we'll spin
# up a new stats gathering thread the next time read_callback()
# gets called by CollectD.
time.sleep(1)
failures += 1
if failures > 3:
self.stop = True
# Marking the feed as dead so we'll attempt to recreate it and
# survive transient Docker daemon errors/unavailabilities.
self._feed = None
collectd.info('Stopped stats gathering for {container}.'
.format(container=_c(self._container)))
@property
def stats(self):
"""Wait, if needed, for stats to be available and return the most
recently read stats data, parsed as JSON, for the container."""
while not self._stats:
pass
return self._stats
class DockerPlugin:
"""
CollectD plugin for collecting statistics about running containers via
Docker's remote API /<container>/stats endpoint.
"""
DEFAULT_BASE_URL = 'unix://var/run/docker.sock'
DEFAULT_DOCKER_TIMEOUT = 5
# The stats endpoint is only supported by API >= 1.17
MIN_DOCKER_API_VERSION = '1.17'
CLASSES = [NetworkStats, BlkioStats, CpuStats, MemoryStats]
def __init__(self, docker_url=None):
self.docker_url = docker_url or DockerPlugin.DEFAULT_BASE_URL
self.timeout = DockerPlugin.DEFAULT_DOCKER_TIMEOUT
self.capture = False
self.stats = {}
def configure_callback(self, conf):
for node in conf.children:
if node.key == 'BaseURL':
self.docker_url = node.values[0]
elif node.key == 'Timeout':
self.timeout = int(node.values[0])
def init_callback(self):
self.client = docker.Client(
base_url=self.docker_url,
version=DockerPlugin.MIN_DOCKER_API_VERSION)
self.client.timeout = self.timeout
# Check API version for stats endpoint support.
try:
version = self.client.version()['ApiVersion']
if StrictVersion(version) < \
StrictVersion(DockerPlugin.MIN_DOCKER_API_VERSION):
raise Exception
except:
collectd.warning(('Docker daemon at {url} does not '
'support container statistics!')
.format(url=self.docker_url))
return False
collectd.register_read(self.read_callback)
collectd.info(('Collecting stats about Docker containers from {url} '
'(API version {version}; timeout: {timeout}s).')
.format(url=self.docker_url,
version=version,
timeout=self.timeout))
return True
def read_callback(self):
containers = [c for c in self.client.containers()
if c['Status'].startswith('Up')]
# Terminate stats gathering threads for containers that are not running
# anymore.
for cid in set(self.stats) - set(map(lambda c: c['Id'], containers)):
self.stats[cid].stop = True
del self.stats[cid]
for container in containers:
try:
for name in container['Names']:
# Containers can be linked and the container name is not
# necessarly the first entry of the list
if not re.match("/.*/", name):
container['Name'] = name[1:]
# Start a stats gathering thread if the container is new.
if container['Id'] not in self.stats:
self.stats[container['Id']] = ContainerStats(container,
self.client)
# Get and process stats from the container.
stats = self.stats[container['Id']].stats
t = stats['read']
for klass in self.CLASSES:
klass.read(container, stats, t)
except Exception, e:
collectd.warning(('Error getting stats for container '
'{container}: {msg}')
.format(container=_c(container), msg=e))
# Command-line execution
if __name__ == '__main__':
class ExecCollectdValues:
def dispatch(self):
if not getattr(self, 'host', None):
self.host = os.environ.get('COLLECTD_HOSTNAME', 'localhost')
identifier = '%s/%s' % (self.host, self.plugin)
if getattr(self, 'plugin_instance', None):
identifier += '-' + self.plugin_instance
identifier += '/' + self.type
if getattr(self, 'type_instance', None):
identifier += '-' + self.type_instance
print 'PUTVAL', identifier, \
':'.join(map(str, [int(self.time)] + self.values))
class ExecCollectd:
def Values(self):
return ExecCollectdValues()
def warning(self, msg):
print 'WARNING:', msg
def info(self, msg):
print 'INFO:', msg
def register_read(self, docker_plugin):
pass
collectd = ExecCollectd()
plugin = DockerPlugin()
if len(sys.argv) > 1:
plugin.docker_url = sys.argv[1]
if plugin.init_callback():
plugin.read_callback()
# Normal plugin execution via CollectD
else:
import collectd
plugin = DockerPlugin()
collectd.register_config(plugin.configure_callback)
collectd.register_init(plugin.init_callback)
| zsuzhengdu/docker-collectd-plugin | dockerplugin.py | Python | gpl-2.0 | 14,070 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import flask
import logging
from bson import ObjectId
from flask import current_app as app
from eve.utils import config
from superdesk.activity import add_activity, ACTIVITY_CREATE, ACTIVITY_UPDATE
from superdesk.metadata.item import SIGN_OFF
from superdesk.services import BaseService
from superdesk.utils import is_hashed, get_hash, compare_preferences
from superdesk import get_resource_service
from superdesk.emails import send_user_status_changed_email, send_activate_account_email
from superdesk.utc import utcnow
from superdesk.privilege import get_item_privilege_name, get_privilege_list
from superdesk.errors import SuperdeskApiError
from superdesk.users.errors import UserInactiveError, UserNotRegisteredException
from superdesk.notification import push_notification
from superdesk.validation import ValidationError
from superdesk.utils import ignorecase_query
logger = logging.getLogger(__name__)
def get_display_name(user):
if user.get("first_name") or user.get("last_name"):
display_name = "%s %s" % (user.get("first_name", ""), user.get("last_name", ""))
return display_name.strip()
else:
return user.get("username")
def is_admin(user):
"""Test if given user is admin.
:param user
"""
return user.get("user_type", "user") == "administrator"
def get_admin_privileges():
"""Get privileges for admin user."""
return dict.fromkeys([p["name"] for p in get_privilege_list()], 1)
def get_privileges(user, role):
"""Get privileges for given user and role.
:param user
:param role
"""
if is_admin(user):
return get_admin_privileges()
if role:
role_privileges = role.get("privileges", {})
return dict(list(role_privileges.items()) + list(user.get("privileges", {}).items()))
return user.get("privileges", {})
def current_user_has_privilege(privilege):
"""Test if current user has given privilege.
In case there is no current user we assume it's system (via worker/manage.py)
and let it pass.
:param privilege
"""
if not getattr(flask.g, "user", None): # no user - worker can do it
return True
privileges = get_privileges(flask.g.user, getattr(flask.g, "role", None))
return privileges.get(privilege, False)
def current_user_has_item_privilege(resource: str, item):
"""Check if current user has privilege for item."""
return current_user_has_privilege(get_item_privilege_name(resource, item))
def is_sensitive_update(updates):
"""Test if given update is sensitive and might change user privileges."""
return "role" in updates or "privileges" in updates or "user_type" in updates
def get_invisible_stages(user_id):
user_desks = list(get_resource_service("user_desks").get(req=None, lookup={"user_id": user_id}))
user_desk_ids = [d["_id"] for d in user_desks]
return get_resource_service("stages").get_stages_by_visibility(False, user_desk_ids)
def set_sign_off(user):
"""
Set sign_off property on user if it's not set already.
"""
if SIGN_OFF not in user or user[SIGN_OFF] is None:
sign_off_mapping = app.config.get("SIGN_OFF_MAPPING", None)
if sign_off_mapping and sign_off_mapping in user:
user[SIGN_OFF] = user[sign_off_mapping]
elif SIGN_OFF in user and user[SIGN_OFF] is None:
user[SIGN_OFF] = ""
elif "first_name" not in user or "last_name" not in user:
user[SIGN_OFF] = user["username"][:3].upper()
else:
user[SIGN_OFF] = "{first_name[0]}{last_name[0]}".format(**user)
def update_sign_off(updates):
"""
Update sign_off property on user if the mapped field is changed.
"""
sign_off_mapping = app.config.get("SIGN_OFF_MAPPING", None)
if sign_off_mapping and sign_off_mapping in updates:
updates[SIGN_OFF] = updates[sign_off_mapping]
def get_sign_off(user):
"""
Gets sign_off property on user if it's not set already.
"""
if SIGN_OFF not in user or user[SIGN_OFF] is None:
set_sign_off(user)
return user[SIGN_OFF]
class UsersService(BaseService):
_updating_stage_visibility = True
def __is_invalid_operation(self, user, updates, method):
"""Checks if the requested 'PATCH' or 'DELETE' operation is Invalid.
Operation is invalid if one of the below is True:
1. Check if the user is updating his/her own status.
2. Check if the user is changing the role/user_type/privileges of other logged-in users.
3. A user without 'User Management' privilege is changing status/role/user_type/privileges
:return: error message if invalid.
"""
if "user" in flask.g:
if method == "PATCH":
if "is_active" in updates or "is_enabled" in updates:
if str(user["_id"]) == str(flask.g.user["_id"]):
return "Not allowed to change your own status"
elif not current_user_has_privilege("users"):
return "Insufficient privileges to change user state"
if (
str(user["_id"]) != str(flask.g.user["_id"])
and user.get("session_preferences")
and is_sensitive_update(updates)
):
return "Not allowed to change the role/user_type/privileges of a logged-in user"
elif method == "DELETE" and str(user["_id"]) == str(flask.g.user["_id"]):
return "Not allowed to disable your own profile."
if method == "PATCH" and is_sensitive_update(updates) and not current_user_has_privilege("users"):
return "Insufficient privileges to update role/user_type/privileges"
def __handle_status_changed(self, updates, user):
enabled = updates.get("is_enabled", None)
active = updates.get("is_active", None)
if enabled is not None or active is not None:
get_resource_service("auth").delete_action({"username": user.get("username")}) # remove active tokens
updates["session_preferences"] = {}
# send email notification
can_send_mail = get_resource_service("preferences").email_notification_is_enabled(user_id=user["_id"])
status = ""
if enabled is not None:
status = "enabled" if enabled else "disabled"
if (status == "" or status == "enabled") and active is not None:
status = "enabled and active" if active else "enabled but inactive"
if can_send_mail:
send_user_status_changed_email([user.get("email")], status)
def __send_notification(self, updates, user):
user_id = user["_id"]
if "is_enabled" in updates and not updates["is_enabled"]:
push_notification("user_disabled", updated=1, user_id=str(user_id))
elif "is_active" in updates and not updates["is_active"]:
push_notification("user_inactivated", updated=1, user_id=str(user_id))
elif "role" in updates:
push_notification("user_role_changed", updated=1, user_id=str(user_id))
elif "privileges" in updates:
added, removed, modified = compare_preferences(user.get("privileges", {}), updates["privileges"])
if len(removed) > 0 or (1, 0) in modified.values():
push_notification("user_privileges_revoked", updated=1, user_id=str(user_id))
if len(added) > 0:
add_activity(
ACTIVITY_UPDATE,
"user {{user}} has been granted new privileges: Please re-login.",
self.datasource,
notify=[user_id],
user=user.get("display_name", user.get("username")),
)
elif "user_type" in updates:
if not is_admin(updates):
push_notification("user_type_changed", updated=1, user_id=str(user_id))
else:
add_activity(
ACTIVITY_UPDATE,
"user {{user}} is updated to administrator: Please re-login.",
self.datasource,
notify=[user_id],
user=user.get("display_name", user.get("username")),
)
else:
push_notification("user", updated=1, user_id=str(user_id))
def get_avatar_renditions(self, doc):
renditions = get_resource_service("upload").find_one(req=None, _id=doc)
return renditions.get("renditions") if renditions is not None else None
def on_create(self, docs):
for user_doc in docs:
user_doc.setdefault("password_changed_on", utcnow())
user_doc.setdefault("display_name", get_display_name(user_doc))
user_doc.setdefault(SIGN_OFF, set_sign_off(user_doc))
user_doc.setdefault("role", get_resource_service("roles").get_default_role_id())
if user_doc.get("avatar"):
user_doc.setdefault("avatar_renditions", self.get_avatar_renditions(user_doc["avatar"]))
get_resource_service("preferences").set_user_initial_prefs(user_doc)
def on_created(self, docs):
for user_doc in docs:
self.__update_user_defaults(user_doc)
add_activity(
ACTIVITY_CREATE,
"created user {{user}}",
self.datasource,
user=user_doc.get("display_name", user_doc.get("username")),
)
self.update_stage_visibility_for_user(user_doc)
def on_update(self, updates, original):
"""Overriding the method to:
1. Prevent user from the below:
a. Check if the user is updating his/her own status.
b. Check if the user is changing the status of other logged-in users.
c. A user without 'User Management' privilege is changing role/user_type/privileges
2. Set Sign Off property if it's not been set already
"""
error_message = self.__is_invalid_operation(original, updates, "PATCH")
if error_message:
raise SuperdeskApiError.forbiddenError(message=error_message)
if updates.get("is_enabled", False):
updates["is_active"] = True
update_sign_off(updates)
if updates.get("avatar"):
updates["avatar_renditions"] = self.get_avatar_renditions(updates["avatar"])
def on_updated(self, updates, user):
if "role" in updates or "privileges" in updates:
get_resource_service("preferences").on_update(updates, user)
self.__handle_status_changed(updates, user)
self.__send_notification(updates, user)
def on_delete(self, user):
"""Overriding the method to prevent user from the below:
1. Check if the user is updating his/her own status.
2. Check if the user is changing the status of other logged-in users.
3. A user without 'User Management' privilege is changing role/user_type/privileges
"""
updates = {"is_enabled": False, "is_active": False}
error_message = self.__is_invalid_operation(user, updates, "DELETE")
if error_message:
raise SuperdeskApiError.forbiddenError(message=error_message)
def delete(self, lookup):
"""
Overriding the method to prevent from hard delete
"""
user = super().find_one(req=None, _id=str(lookup["_id"]))
return super().update(
id=ObjectId(lookup["_id"]), updates={"is_enabled": False, "is_active": False}, original=user
)
def __clear_locked_items(self, user_id):
archive_service = get_resource_service("archive")
archive_autosave_service = get_resource_service("archive_autosave")
doc_to_unlock = {
"lock_user": None,
"lock_session": None,
"lock_time": None,
"lock_action": None,
"force_unlock": True,
}
user = ObjectId(user_id) if isinstance(user_id, str) else user_id
query = {"$or": [{"lock_user": user}, {"task.user": user, "task.desk": {"$exists": False}}]}
items_locked_by_user = archive_service.get_from_mongo(req=None, lookup=query)
if items_locked_by_user and items_locked_by_user.count():
for item in items_locked_by_user:
# delete the item if nothing is saved so far
if item[config.VERSION] == 0 and item["state"] == "draft":
get_resource_service("archive").delete(lookup={"_id": item["_id"]})
else:
archive_service.update(item["_id"], doc_to_unlock, item)
archive_autosave_service.delete(lookup={"_id": item["_id"]})
def on_deleted(self, doc):
"""Overriding to add to activity stream and handle user clean up.
1. Authenticated Sessions
2. Locked Articles
3. Reset Password Tokens
"""
add_activity(
ACTIVITY_UPDATE,
"disabled user {{user}}",
self.datasource,
user=doc.get("display_name", doc.get("username")),
)
self.__clear_locked_items(str(doc["_id"]))
self.__handle_status_changed(updates={"is_enabled": False, "is_active": False}, user=doc)
def on_fetched(self, document):
for doc in document["_items"]:
self.__update_user_defaults(doc)
def on_fetched_item(self, doc):
self.__update_user_defaults(doc)
def __update_user_defaults(self, doc):
"""Set default fields for users"""
doc.pop("password", None)
doc.setdefault("display_name", get_display_name(doc))
doc.setdefault("is_enabled", doc.get("is_active"))
doc.setdefault(SIGN_OFF, set_sign_off(doc))
doc["dateline_source"] = app.config["ORGANIZATION_NAME_ABBREVIATION"]
def user_is_waiting_activation(self, doc):
return doc.get("needs_activation", False)
def is_user_active(self, doc):
return doc.get("is_active", False)
def get_role(self, user):
if user:
role_id = user.get("role", None)
if role_id:
return get_resource_service("roles").find_one(_id=role_id, req=None)
return None
def set_privileges(self, user, role):
user["active_privileges"] = get_privileges(user, role)
def get(self, req, lookup):
try:
is_author = req.args["is_author"]
except (AttributeError, TypeError, KeyError):
pass
else:
if is_author in ("0", "1"):
lookup["is_author"] = bool(int(is_author))
else:
logger.warn("bad value of is_author argument ({value})".format(value=is_author))
"""filtering out inactive users and disabled users"""
args = req.args if req and req.args else {}
# Filtering inactive users
if not args.get("show_inactive"):
if lookup is not None:
lookup["is_active"] = True
else:
lookup = {"is_active": True}
# Filtering disabled users
if not args.get("show_disabled"):
if lookup is not None:
lookup["is_enabled"] = True
else:
lookup = {"is_enabled": True}
return super().get(req, lookup)
def get_users_by_user_type(self, user_type="user"):
return list(self.get(req=None, lookup={"user_type": user_type}))
def get_users_by_role(self, role_id):
return list(self.get(req=None, lookup={"role": role_id}))
def get_invisible_stages(self, user_id):
return get_invisible_stages(user_id) if user_id else []
def get_invisible_stages_ids(self, user_id):
return [str(stage["_id"]) for stage in self.get_invisible_stages(user_id)]
def get_user_by_email(self, email_address):
"""Finds a user by the given email_address.
Does a exact match.
:param email_address:
:type email_address: str with valid email format
:return: user object if found.
:rtype: dict having user details :py:class: `superdesk.users.users.UsersResource`
:raises: UserNotRegisteredException if no user found with the given email address.
"""
user = self.find_one(req=None, email=email_address)
if not user:
raise UserNotRegisteredException("No user registered with email %s" % email_address)
return user
def update_stage_visibility_for_users(self):
if not self._updating_stage_visibility:
return
logger.info("Updating Stage Visibility Started")
users = list(get_resource_service("users").get(req=None, lookup=None))
for user in users:
self.update_stage_visibility_for_user(user)
logger.info("Updating Stage Visibility Completed")
def update_stage_visibility_for_user(self, user):
if not self._updating_stage_visibility:
return
try:
logger.info("Updating Stage Visibility for user {}.".format(user.get(config.ID_FIELD)))
stages = self.get_invisible_stages_ids(user.get(config.ID_FIELD))
self.system_update(user.get(config.ID_FIELD), {"invisible_stages": stages}, user)
user["invisible_stages"] = stages
logger.info("Updated Stage Visibility for user {}.".format(user.get(config.ID_FIELD)))
except Exception:
logger.exception("Failed to update the stage visibility " "for user: {}".format(user.get(config.ID_FIELD)))
def stop_updating_stage_visibility(self):
if not app.config.get("SUPERDESK_TESTING"):
raise RuntimeError("Only allowed during testing")
self._updating_stage_visibility = False
def start_updating_stage_visibility(self):
self._updating_stage_visibility = True
class DBUsersService(UsersService):
"""
Service class for UsersResource and should be used when AD is inactive.
"""
def on_create(self, docs):
super().on_create(docs)
for doc in docs:
if doc.get("password", None) and not is_hashed(doc.get("password")):
doc["password"] = get_hash(doc.get("password"), app.config.get("BCRYPT_GENSALT_WORK_FACTOR", 12))
def on_created(self, docs):
"""Send email to user with reset password token."""
super().on_created(docs)
resetService = get_resource_service("reset_user_password")
activate_ttl = app.config["ACTIVATE_ACCOUNT_TOKEN_TIME_TO_LIVE"]
for doc in docs:
if self.user_is_waiting_activation(doc):
tokenDoc = {"user": doc["_id"], "email": doc["email"]}
id = resetService.store_reset_password_token(tokenDoc, doc["email"], activate_ttl, doc["_id"])
if not id:
raise SuperdeskApiError.internalError("Failed to send account activation email.")
tokenDoc.update({"username": doc["username"]})
send_activate_account_email(tokenDoc, activate_ttl)
def on_update(self, updates, user):
super().on_update(updates, user)
if updates.get("first_name") or updates.get("last_name"):
updated_user = {
"first_name": user.get("first_name", ""),
"last_name": user.get("last_name", ""),
"username": user.get("username", ""),
}
if updates.get("first_name"):
updated_user["first_name"] = updates.get("first_name")
if updates.get("last_name"):
updated_user["last_name"] = updates.get("last_name")
updates["display_name"] = get_display_name(updated_user)
def update_password(self, user_id, password):
"""Update the user password.
Returns true if successful.
"""
user = self.find_one(req=None, _id=user_id)
if not user:
raise SuperdeskApiError.unauthorizedError("User not found")
if not self.is_user_active(user):
raise UserInactiveError()
updates = {
"password": get_hash(password, app.config.get("BCRYPT_GENSALT_WORK_FACTOR", 12)),
"password_changed_on": utcnow(),
app.config["LAST_UPDATED"]: utcnow(),
}
if self.user_is_waiting_activation(user):
updates["needs_activation"] = False
self.patch(user_id, updates=updates)
def on_deleted(self, doc):
"""
Overriding clean up reset password tokens:
"""
super().on_deleted(doc)
get_resource_service("reset_user_password").remove_all_tokens_for_email(doc.get("email"))
def _process_external_data(self, _data, update=False):
data = _data.copy()
if data.get("role"):
role_name = data.pop("role")
role = get_resource_service("roles").find_one(req=None, name=ignorecase_query(role_name))
if role:
data["role"] = role["_id"]
if not update and (data.get("desk") or app.config.get("USER_EXTERNAL_DESK")):
desk_name = data.pop("desk", None) or app.config.get("USER_EXTERNAL_DESK")
desk = get_resource_service("desks").find_one(req=None, name=ignorecase_query(desk_name))
if desk:
data["desk"] = desk["_id"]
data["needs_activation"] = False
if update:
data.pop("desk", None)
data.pop("email", None)
data.pop("username", None)
elif data.get("username"):
if app.config.get("USER_EXTERNAL_USERNAME_STRIP_DOMAIN"):
data["username"] = data["username"].split("@")[0]
data["username"] = data["username"].replace("@", ".") # @ breaks mentioning
validator = self._validator()
if not validator.validate(data, update=update):
raise ValidationError(validator.errors)
return validator.normalized(data) if not update else data
def create_external_user(self, data):
docs = [self._process_external_data(data)]
self.on_create(docs)
self.create(docs)
for user in docs:
if user.get("desk"):
get_resource_service("desks").add_member(user["desk"], user["_id"])
return docs[0]
def update_external_user(self, _id, data):
orig = self.find_one(req=None, _id=ObjectId(_id))
updates = self._process_external_data(data, update=True)
self.system_update(ObjectId(_id), updates, orig)
| superdesk/superdesk-core | superdesk/users/services.py | Python | agpl-3.0 | 22,920 |
#
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""class for tests in ceilometer/alarm/evaluator/__init__.py
"""
import datetime
import mock
from oslo_utils import timeutils
from oslotest import base
from ceilometer.alarm import evaluator
class TestEvaluatorBaseClass(base.BaseTestCase):
def setUp(self):
super(TestEvaluatorBaseClass, self).setUp()
self.called = False
def _notify(self, alarm, previous, reason, details):
self.called = True
raise Exception('Boom!')
def test_base_refresh(self):
notifier = mock.MagicMock()
notifier.notify = self._notify
class EvaluatorSub(evaluator.Evaluator):
def evaluate(self, alarm):
pass
ev = EvaluatorSub(notifier)
ev.api_client = mock.MagicMock()
ev._refresh(mock.MagicMock(), mock.MagicMock(),
mock.MagicMock(), mock.MagicMock())
self.assertTrue(self.called)
@mock.patch.object(timeutils, 'utcnow')
def test_base_time_constraints(self, mock_utcnow):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
'start': '0 11 * * *', # daily at 11:00
'duration': 10800, # 3 hours
'timezone': ''},
{'name': 'test2',
'description': 'test',
'start': '0 23 * * *', # daily at 23:00
'duration': 10800, # 3 hours
'timezone': ''},
]
cls = evaluator.Evaluator
mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 12, 0, 0)
self.assertTrue(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 1, 0, 0)
self.assertTrue(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 5, 0, 0)
self.assertFalse(cls.within_time_constraint(alarm))
@mock.patch.object(timeutils, 'utcnow')
def test_base_time_constraints_by_month(self, mock_utcnow):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
'start': '0 11 31 1,3,5,7,8,10,12 *', # every 31st at 11:00
'duration': 10800, # 3 hours
'timezone': ''},
]
cls = evaluator.Evaluator
mock_utcnow.return_value = datetime.datetime(2015, 3, 31, 11, 30, 0)
self.assertTrue(cls.within_time_constraint(alarm))
@mock.patch.object(timeutils, 'utcnow')
def test_base_time_constraints_complex(self, mock_utcnow):
alarm = mock.MagicMock()
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
# Every consecutive 2 minutes (from the 3rd to the 57th) past
# every consecutive 2 hours (between 3:00 and 12:59) on every day.
'start': '3-57/2 3-12/2 * * *',
'duration': 30,
'timezone': ''}
]
cls = evaluator.Evaluator
# test minutes inside
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 3, 0)
self.assertTrue(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0)
self.assertTrue(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 57, 0)
self.assertTrue(cls.within_time_constraint(alarm))
# test minutes outside
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 2, 0)
self.assertFalse(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 4, 0)
self.assertFalse(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 58, 0)
self.assertFalse(cls.within_time_constraint(alarm))
# test hours inside
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0)
self.assertTrue(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 5, 31, 0)
self.assertTrue(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 11, 31, 0)
self.assertTrue(cls.within_time_constraint(alarm))
# test hours outside
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 1, 31, 0)
self.assertFalse(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 4, 31, 0)
self.assertFalse(cls.within_time_constraint(alarm))
mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 12, 31, 0)
self.assertFalse(cls.within_time_constraint(alarm))
@mock.patch.object(timeutils, 'utcnow')
def test_base_time_constraints_timezone(self, mock_utcnow):
alarm = mock.MagicMock()
cls = evaluator.Evaluator
mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 11, 0, 0)
alarm.time_constraints = [
{'name': 'test',
'description': 'test',
'start': '0 11 * * *', # daily at 11:00
'duration': 10800, # 3 hours
'timezone': 'Europe/Ljubljana'}
]
self.assertTrue(cls.within_time_constraint(alarm))
alarm.time_constraints = [
{'name': 'test2',
'description': 'test2',
'start': '0 11 * * *', # daily at 11:00
'duration': 10800, # 3 hours
'timezone': 'US/Eastern'}
]
self.assertFalse(cls.within_time_constraint(alarm))
| cernops/ceilometer | ceilometer/tests/unit/alarm/evaluator/test_base.py | Python | apache-2.0 | 6,203 |
SIZE = 16
CAPTURE_CHAR = 0x3C # K28.1
SOP_CHAR = 0xBC # K28.5
def pattern(link):
base_pattern = ((link + 1) << 8)
# per imperial grps request
base_pattern = 0
output = []
for i in range(SIZE):
index_pattern = (i << 16)
word = base_pattern | index_pattern
if i == 0:
word = (word & 0xFFFFFF00) | CAPTURE_CHAR
elif i % 4 == 0:
word = 0x505050bc
output.append(word)
return output
| efarres/GoIPbus | cactuscore/softipbus/scripts/integration_patterns.py | Python | gpl-2.0 | 479 |
/////snippets
cd /home/common/shade
python3 manage.py makemigrations && python3 manage.py migrate
cd /home/common/shade
python3 manage.py shell
from primus.ostium import *
from primus.models import *
cd /home/common/shade
git add --all && git commit -all -m ''
git push shade_app master
( , '' ) ,
#####################################################################
# Classe de Template
#####################################################################
class Template(models.Model):
'''
docstring
'''
# TODO
# Variables pour les choix pré-remplis
# Attributs
gid = models.AutoField(primary_key = True , db_index = True)
name = models.CharField(Template)
geom = gismodels.Template(srid = 3857)
# Methodes
def __str__(self):
return str(self.name)
1 Empire du Roi-Lune
2 Lagashein
3 Lombrie
4 Ostrie
5 Pays clémentin
6 Ravénie
7 Thémésie
# 8 Nécris (pas un pays, ville franche)
Correspondance pays <-> origin_choice
origin_dict = {1:3, 2:2, 3:1, 4:2, 5:1, 6:1, 7:2}
Capitales :
1 Ishmer
2 Orffstein
3 Trevoletta , Orphia , Montenero
4 Lisselberg
5 Clémence
6 Agostina
7 Thémée
Code pays | Nom pays | Code origine | Préfixe
1 | E. Roi-Lune | 3 | moonking_
2 | Lagashein | 2 | ostrian_
3 | Lombrie | 1 | clementine_
4 | Ostrie | 2 | ostrian_
5 | P. clémentin | 1 | clementine_
6 | Ravénie | 1 | clementine_
7 | Thémésie | 2 | ostrian_
Agréments / préfixes / Suffixes
di
del
san
sant
santa
del
della
kwargs = {}
kwargs['genre'] = 2
kwargs['origin'] = 1
kwargs['name'] = 'John'
obj_a_creer = FirstName(**kwargs)
obj_a_creer.save()
# Grid
terrain_choices = ( ( 1 , 'Colline' ) ,
( 2 , 'Désert' ) ,
( 3 , 'Forêt' ) ,
( 4 , 'Littoral' ) ,
( 5 , 'Marais' ) ,
( 6 , 'Mer intérieure' ) ,
( 7 , 'Montagne' ) ,
( 8 , 'Océan' ) ,
( 9 , 'Savane' ) ,
( 10, 'Plaine' ) )
# FirstName
genre_choice = (
(1 , 'femme') ,
(2 , 'homme') )
# FirstName LastName
origin_choice = (
(1 , 'Pays clémentin , Ravénie , Lombrie' ) ,
(2 , 'Ostrie, Thémésie, Lagashein' ) ,
(3 , 'Empire du Roi-Lune' ) )
# Grid LastName
allegiance_choices = ( ( 1 , 'Empire du Roi-Lune' ) ,
( 2 , 'Lagashein' ) ,
( 3 , 'Lombrie' ) ,
( 4 , 'Ostrie' ) ,
( 5 , 'Pays clémentin' ) ,
( 6 , 'Ravénie' ) ,
( 7 , 'Thémésie' ) )
region_choices = (#WIP
)
# Town
category_choice = (
( 1 , 'Capitale' ) ,
( 2 , 'Cité' ) ,
( 3 , 'Village' ) ,
( 4 , 'Fort' ) ,
( 5 , 'Fortin' ) )
# Street
type_street_choice = (
( 1 , 'Avenue' ) ,
( 2 , 'Rue' ) ,
( 3 , 'Ruelle' ) ,
( 4 , 'Pont' ) ,
( 5 , 'Ponton' ) )
# Path
type_path_choices = (
( 1 , 'Route' ) ,
( 2 , 'Chemin' ) ,
( 3 , 'Sentier' ) )
# Archetype
beauty_choice = (
( 0 , 'Grande laideur') ,
( 1 , 'Classique' ) ,
( 2 , 'Grande Beauté' ) )
cast_choice = (
( 1 , 'Gueux' ) ,
( 2 , 'Pègre' ) ,
( 3 , 'Popolo minuto' ) ,
( 4 , 'Popolo grasso' ) ,
( 5 , 'Clergé' ) ,
( 6 , 'Haut clergé' ) ,
( 7 , 'Haute bourgeoisie' ) ,
( 8 , 'Noblesse d\'épée' ) ,
( 9 , 'Noblesse de lettre' ) )
category_choice = (
( 1 , 'Basique' ) ,
( 2 , 'Vétéran' ) ,
( 3 , 'Elite' ) )
# Building
category_choices = (
( 1 , 'Religieux' ) ,
( 2 , 'Militaire' ) ,
( 3 , 'Commercial') ,
( 4 , 'Bas-fonds' ) ,
( 5 , 'Privé' ) )
subcategory_choices = (
( 11 , 'Cathédrale' ) ,
( 12 , 'Eglise' ) ,
( 13 , 'Chapelle' ) ,
( 14 , 'Temple' ) ,
( 15 , 'Couvent' ) ,
( 16 , 'Autre (religieux)' ) ,
( 21 , 'Caserne' ) ,
( 22 , 'Réserve' ) ,
( 23 , 'Armurerie' ) ,
( 24 , 'Dortoir' ) ,
( 25 , 'Bastion' ) ,
( 26 , 'Tour' ) ,
( 27 , 'Autre (militaire)' ) ,
( 31 , 'Entrepot' ) ,
( 32 , 'Comptoir' ) ,
( 33 , 'Bureau de change' ) ,
( 34 , 'Boutique' ) ,
( 35 , 'Taverne' ) ,
( 36 , 'Autre (commercial)' ) ,
( 41 , 'Planque' ) ,
( 42 , 'Refuge' ) ,
( 43 , 'Lieu d\'assemblée' ) ,
( 44 , 'Autre (bas-fonds)' ) ,
( 51 , 'Palazzo' ) ,
( 52 , 'Manoir' ) ,
( 53 , 'Maison bourgeoise' ) ,
( 54 , 'Maison modeste' ) ,
( 55 , 'Ruine' ) ,
( 56 , 'Autre (privé)' ) )
deity_choices = (
( 1 , 'Thémésia' ) ,
( 2 , 'Ohmédia' ) ,
( 3 , 'Candélia' ) ,
( 4 , 'Sélène' ) ,
( 5 , 'Inconnu' ) ) | sighill/shade_app | primus/useful_snippets.py | Python | mit | 5,269 |
# Copyright (c) 2013 The SAYCBridge Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import webapp2
import jinja2
import os
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader("templates"))
class BidderHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(jinja_environment.get_template('bidder.html').render())
| abortz/saycbridge | dist/gae/handlers/bidder_handler.py | Python | bsd-3-clause | 447 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Comment'
db.create_table('facebook_posts_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('graph_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['facebook_posts.Post'])),
('author_json', self.gf('annoying.fields.JSONField')(null=True)),
('author_content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='facebook_comments', null=True, to=orm['contenttypes.ContentType'])),
('author_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('created_time', self.gf('django.db.models.fields.DateTimeField')()),
('likes_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('can_remove', self.gf('django.db.models.fields.BooleanField')(default=False)),
('user_likes', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('facebook_posts', ['Comment'])
# Deleting field 'Post.likes'
db.delete_column('facebook_posts_post', 'likes')
# Deleting field 'Post.comments'
db.delete_column('facebook_posts_post', 'comments')
# Adding field 'Post.likes_json'
db.add_column('facebook_posts_post', 'likes_json',
self.gf('annoying.fields.JSONField')(null=True),
keep_default=False)
# Adding field 'Post.comments_json'
db.add_column('facebook_posts_post', 'comments_json',
self.gf('annoying.fields.JSONField')(null=True),
keep_default=False)
# Adding field 'Post.likes_real_count'
db.add_column('facebook_posts_post', 'likes_real_count',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'Post.comments_real_count'
db.add_column('facebook_posts_post', 'comments_real_count',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding M2M table for field likes on 'Post'
db.create_table('facebook_posts_post_likes', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm['facebook_posts.post'], null=False)),
('user', models.ForeignKey(orm['facebook_users.user'], null=False))
))
db.create_unique('facebook_posts_post_likes', ['post_id', 'user_id'])
def backwards(self, orm):
# Deleting model 'Comment'
db.delete_table('facebook_posts_comment')
# Adding field 'Post.likes'
db.add_column('facebook_posts_post', 'likes',
self.gf('annoying.fields.JSONField')(null=True),
keep_default=False)
# Adding field 'Post.comments'
db.add_column('facebook_posts_post', 'comments',
self.gf('annoying.fields.JSONField')(null=True),
keep_default=False)
# Deleting field 'Post.likes_json'
db.delete_column('facebook_posts_post', 'likes_json')
# Deleting field 'Post.comments_json'
db.delete_column('facebook_posts_post', 'comments_json')
# Deleting field 'Post.likes_real_count'
db.delete_column('facebook_posts_post', 'likes_real_count')
# Deleting field 'Post.comments_real_count'
db.delete_column('facebook_posts_post', 'comments_real_count')
# Removing M2M table for field likes on 'Post'
db.delete_table('facebook_posts_post_likes')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_applications.application': {
'Meta': {'ordering': "['name']", 'object_name': 'Application'},
'graph_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'namespace': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'facebook_posts.comment': {
'Meta': {'ordering': "['-created_time']", 'object_name': 'Comment'},
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'facebook_comments'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'author_json': ('annoying.fields.JSONField', [], {'null': 'True'}),
'can_remove': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_time': ('django.db.models.fields.DateTimeField', [], {}),
'graph_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'message': ('django.db.models.fields.TextField', [], {}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['facebook_posts.Post']"}),
'user_likes': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'facebook_posts.post': {
'Meta': {'ordering': "['-created_time']", 'object_name': 'Post'},
'actions': ('annoying.fields.JSONField', [], {'null': 'True'}),
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'null': 'True', 'to': "orm['facebook_applications.Application']"}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'facebook_posts'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'author_json': ('annoying.fields.JSONField', [], {'null': 'True'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'comments_json': ('annoying.fields.JSONField', [], {'null': 'True'}),
'comments_real_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'graph_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'icon': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['facebook_users.User']", 'symmetrical': 'False'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'likes_json': ('annoying.fields.JSONField', [], {'null': 'True'}),
'likes_real_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_tags': ('annoying.fields.JSONField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'object_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'owners_json': ('annoying.fields.JSONField', [], {'null': 'True'}),
'picture': ('django.db.models.fields.TextField', [], {}),
'place': ('annoying.fields.JSONField', [], {'null': 'True'}),
'privacy': ('annoying.fields.JSONField', [], {'null': 'True'}),
'properties': ('annoying.fields.JSONField', [], {'null': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '500'}),
'status_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'story': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'story_tags': ('annoying.fields.JSONField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_time': ('django.db.models.fields.DateTimeField', [], {}),
'with_tags': ('annoying.fields.JSONField', [], {'null': 'True'})
},
'facebook_posts.postowner': {
'Meta': {'ordering': "('post',)", 'unique_together': "(('post', 'owner_content_type', 'owner_id'),)", 'object_name': 'PostOwner'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'facebook_page_posts'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'owner_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owners'", 'to': "orm['facebook_posts.Post']"})
},
'facebook_users.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'graph_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'})
}
}
complete_apps = ['facebook_posts'] | ramusus/django-facebook-posts | facebook_posts/migrations/0002_auto__add_comment__del_field_post_likes__del_field_post_comments__add_.py | Python | bsd-3-clause | 10,906 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsblendmodes.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Nyall Dawson, Massimo Endrighi
Email : nyall dot dawson at gmail.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Nyall Dawson, Massimo Endrighi'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.PyQt.QtCore import QSize
from qgis.PyQt.QtGui import QPainter, QColor
from qgis.core import (QgsVectorLayer,
QgsVectorSimplifyMethod,
QgsMapLayerRegistry,
QgsMultiRenderChecker,
QgsRasterLayer,
QgsMultiBandColorRenderer,
QgsRectangle,
QgsMapSettings
)
from qgis.testing import start_app, unittest
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
class TestQgsBlendModes(unittest.TestCase):
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
self.iface = get_iface()
# initialize class MapRegistry, Canvas, MapRenderer, Map and PAL
self.mMapRegistry = QgsMapLayerRegistry.instance()
# create point layer
myShpFile = os.path.join(TEST_DATA_DIR, 'points.shp')
self.mPointLayer = QgsVectorLayer(myShpFile, 'Points', 'ogr')
self.mMapRegistry.addMapLayer(self.mPointLayer)
self.mSimplifyMethod = QgsVectorSimplifyMethod()
self.mSimplifyMethod.setSimplifyHints(QgsVectorSimplifyMethod.NoSimplification)
# create polygon layer
myShpFile = os.path.join(TEST_DATA_DIR, 'polys.shp')
self.mPolygonLayer = QgsVectorLayer(myShpFile, 'Polygons', 'ogr')
self.mPolygonLayer.setSimplifyMethod(self.mSimplifyMethod)
self.mMapRegistry.addMapLayer(self.mPolygonLayer)
# create line layer
myShpFile = os.path.join(TEST_DATA_DIR, 'lines.shp')
self.mLineLayer = QgsVectorLayer(myShpFile, 'Lines', 'ogr')
self.mLineLayer.setSimplifyMethod(self.mSimplifyMethod)
self.mMapRegistry.addMapLayer(self.mLineLayer)
# create two raster layers
myRasterFile = os.path.join(TEST_DATA_DIR, 'rgb256x256.png')
self.mRasterLayer1 = QgsRasterLayer(myRasterFile, "raster1")
self.mRasterLayer2 = QgsRasterLayer(myRasterFile, "raster2")
myMultiBandRenderer1 = QgsMultiBandColorRenderer(self.mRasterLayer1.dataProvider(), 1, 2, 3)
self.mRasterLayer1.setRenderer(myMultiBandRenderer1)
self.mMapRegistry.addMapLayer(self.mRasterLayer1)
myMultiBandRenderer2 = QgsMultiBandColorRenderer(self.mRasterLayer2.dataProvider(), 1, 2, 3)
self.mRasterLayer2.setRenderer(myMultiBandRenderer2)
self.mMapRegistry.addMapLayer(self.mRasterLayer2)
# to match blend modes test comparisons background
self.mapSettings = QgsMapSettings()
self.mapSettings.setLayers([self.mRasterLayer1.id(), self.mRasterLayer2.id()])
self.mapSettings.setBackgroundColor(QColor(152, 219, 249))
self.mapSettings.setOutputSize(QSize(400, 400))
self.mapSettings.setOutputDpi(96)
self.extent = QgsRectangle(-118.8888888888887720, 22.8002070393376783, -83.3333333333331581, 46.8719806763287536)
def testVectorBlending(self):
"""Test that blend modes work for vector layers."""
# Add vector layers to map
myLayers = []
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mapSettings.setLayers(myLayers)
self.mapSettings.setExtent(self.extent)
# Set blending modes for both layers
self.mLineLayer.setBlendMode(QPainter.CompositionMode_Difference)
self.mPolygonLayer.setBlendMode(QPainter.CompositionMode_Difference)
checker = QgsMultiRenderChecker()
checker.setControlName("expected_vector_blendmodes")
checker.setMapSettings(self.mapSettings)
checker.setColorTolerance(1)
myResult = checker.runTest("vector_blendmodes", 20)
myMessage = ('vector blending failed')
assert myResult, myMessage
# Reset layers
self.mLineLayer.setBlendMode(QPainter.CompositionMode_SourceOver)
self.mPolygonLayer.setBlendMode(QPainter.CompositionMode_SourceOver)
def testVectorFeatureBlending(self):
"""Test that feature blend modes work for vector layers."""
# Add vector layers to map
myLayers = []
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mapSettings.setLayers(myLayers)
self.mapSettings.setExtent(self.extent)
# Set feature blending for line layer
self.mLineLayer.setFeatureBlendMode(QPainter.CompositionMode_Plus)
checker = QgsMultiRenderChecker()
checker.setControlName("expected_vector_featureblendmodes")
checker.setMapSettings(self.mapSettings)
checker.setColorTolerance(1)
myResult = checker.runTest("vector_featureblendmodes", 20)
myMessage = ('vector feature blending failed')
assert myResult, myMessage
# Reset layers
self.mLineLayer.setFeatureBlendMode(QPainter.CompositionMode_SourceOver)
def testVectorLayerTransparency(self):
"""Test that layer transparency works for vector layers."""
# Add vector layers to map
myLayers = []
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mapSettings.setLayers(myLayers)
self.mapSettings.setExtent(self.extent)
# Set feature blending for line layer
self.mLineLayer.setLayerTransparency(50)
checker = QgsMultiRenderChecker()
checker.setControlName("expected_vector_layertransparency")
checker.setMapSettings(self.mapSettings)
checker.setColorTolerance(1)
myResult = checker.runTest("vector_layertransparency", 20)
myMessage = ('vector layer transparency failed')
assert myResult, myMessage
def testRasterBlending(self):
"""Test that blend modes work for raster layers."""
# Add raster layers to map
myLayers = []
myLayers.append(self.mRasterLayer1.id())
myLayers.append(self.mRasterLayer2.id())
self.mapSettings.setLayers(myLayers)
self.mapSettings.setExtent(self.mRasterLayer1.extent())
# Set blending mode for top layer
self.mRasterLayer1.setBlendMode(QPainter.CompositionMode_Difference)
checker = QgsMultiRenderChecker()
checker.setControlName("expected_raster_blendmodes")
checker.setMapSettings(self.mapSettings)
checker.setColorTolerance(1)
checker.setColorTolerance(1)
myResult = checker.runTest("raster_blendmodes", 20)
myMessage = ('raster blending failed')
assert myResult, myMessage
if __name__ == '__main__':
unittest.main()
| wonder-sk/QGIS | tests/src/python/test_qgsblendmodes.py | Python | gpl-2.0 | 7,920 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
import re
import datetime
import Sub
class Vid:
def __init__(self, vid_path, sub_path):
self.vid_path = vid_path
self.sub_path = sub_path
self.subs = {}
try:
with open(sub_path, 'r') as content_file:
content = content_file.read()
fiter = re.finditer(
"^\d+\s+\d+:\d+:\d+,\d+\s*-->\s*\d+:\d+:\d+,\d+\s+[\w\s\-.,;:!?'\"\\/#@$&)(\]\[]+\n\n",
content,
re.MULTILINE | re.IGNORECASE
)
for f in fiter :
l = f.group(0).split('\n', 2)
times = l[1].split(' --> ', 1)
dts = datetime.datetime.strptime(times[0], '%H:%M:%S,%f')
start_time = dts.second
start_time += dts.minute * 60
start_time += dts.hour * 3600
start_time += dts.microsecond / 1000000
dte = datetime.datetime.strptime(times[1], '%H:%M:%S,%f')
end_time = dte.second
end_time += dte.minute * 60
end_time += dte.hour * 3600
end_time += dte.microsecond / 1000000
self.subs[int(l[0])] = Sub.Sub(int(l[0]), l[2], start_time, end_time)
except IOError as e:
print("The subtitle file does not exist")
def __str__(self):
r = "vid path : " + self.vid_path + " -- "
r += "sub path : " + self.sub_path + "\n"
for k in self.subs:
r += str(self.subs[k])
return r
def search(self, s):
r = []
for k in self.subs:
if self.subs[k].search(s) != -1:
r.append(k)
return r
if __name__ == "__main__":
v = Vid("", "examples/bcs-s1e1.srt")
r = v.search("law")
print(r)
| KevinHivert/SearchNPlay | Vid.py | Python | unlicense | 1,846 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon import settings
from polyaxon.proxies.schemas.gateway.forward import get_forward_cmd
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.proxies_mark
class TestGatewayForward(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_forward_config_empty(self):
assert get_forward_cmd() is None
def test_forward_config_wrong(self):
settings.PROXIES_CONFIG.forward_proxy_kind = "foo"
settings.PROXIES_CONFIG.has_forward_proxy = True
settings.PROXIES_CONFIG.forward_proxy_port = 8080
settings.PROXIES_CONFIG.forward_proxy_host = "123.123.123.123"
settings.PROXIES_CONFIG.api_port = 443
settings.PROXIES_CONFIG.api_host = "cloud.polyaxon.com"
assert get_forward_cmd() is None
def test_forward_config_transparent(self):
settings.PROXIES_CONFIG.forward_proxy_kind = "transparent"
settings.PROXIES_CONFIG.has_forward_proxy = True
settings.PROXIES_CONFIG.forward_proxy_port = 8080
settings.PROXIES_CONFIG.forward_proxy_host = "123.123.123.123"
expected = """
#!/bin/bash
set -e
set -o pipefail
socat TCP4-LISTEN:8443,reuseaddr,fork TCP:123.123.123.123:8080
""" # noqa
assert get_forward_cmd() == expected
def test_forward_config_connect(self):
settings.PROXIES_CONFIG.forward_proxy_kind = "connect"
settings.PROXIES_CONFIG.has_forward_proxy = True
settings.PROXIES_CONFIG.forward_proxy_port = 8080
settings.PROXIES_CONFIG.forward_proxy_host = "123.123.123.123"
settings.PROXIES_CONFIG.api_port = 443
settings.PROXIES_CONFIG.api_host = "cloud.polyaxon.com"
expected = """
#!/bin/bash
set -e
set -o pipefail
socat TCP4-LISTEN:8443,reuseaddr,fork,bind=127.0.0.1 PROXY:123.123.123.123:cloud.polyaxon.com:443,proxyport=8080
""" # noqa
assert get_forward_cmd() == expected
def test_forward_config_default(self):
settings.PROXIES_CONFIG.has_forward_proxy = True
settings.PROXIES_CONFIG.forward_proxy_port = 8080
settings.PROXIES_CONFIG.forward_proxy_host = "123.123.123.123"
settings.PROXIES_CONFIG.api_port = 443
settings.PROXIES_CONFIG.api_host = "cloud.polyaxon.com"
expected = """
#!/bin/bash
set -e
set -o pipefail
socat TCP4-LISTEN:8443,reuseaddr,fork,bind=127.0.0.1 PROXY:123.123.123.123:cloud.polyaxon.com:443,proxyport=8080
""" # noqa
assert get_forward_cmd() == expected
| polyaxon/polyaxon | core/tests/test_proxies/test_gateway/test_forward.py | Python | apache-2.0 | 3,074 |
import pdb
class TimingDiagram:
def print_diagram(self, xtsm_object):
pdb.set_trace()
seq = xtsm_object.XTSM.getActiveSequence()
cMap=seq.getOwnerXTSM().getDescendentsByType("ChannelMap")[0]
#channelHeir=cMap.createTimingGroupHeirarchy()
#channelRes=cMap.findTimingGroupResolutions()
#Parser out put node. Use TimingProffer
#Control arrays hold what is actually coming out.
seq.collectTimingProffers()
edge_timings = seq.TimingProffer.data['Edge']
class Edge:
def __init__(self, timing_group, channel_number, time, value, tag,
name, initial_value, holding_value):
self.timing_group = timing_group
self.channel_number = channel_number
self.time = time
self.value = value
self.tag = tag
self.max = 0
self.min = 0
self.name = name
self.holding_value = holding_value
self.initial_value = initial_value
def is_same(self,edge):
if ((self.timing_group == edge.timing_group) and
(self.channel_number == edge.channel_number) and
(self.time == edge.time) and
(self.value == edge.value) and
(self.tag == edge.tag)):
return True
else:
return False
edges = []
longest_name = 0
for edge in edge_timings:
for channel in cMap.Channel:
tgroup = int(channel.TimingGroup.PCDATA)
tgroupIndex = int(channel.TimingGroupIndex.PCDATA)
if tgroup == int(edge[0]) and tgroupIndex == int(edge[1]):
name = channel.ChannelName.PCDATA
init_val = ''
hold_val = ''
try:
init_val = channel.InitialValue.PCDATA
except AttributeError:
init_val = 'None '
try:
hold_val = channel.HoldingValue.PCDATA
except AttributeError:
hold_val = 'None '
if len(name) > longest_name:
longest_name = len(name)
edges.append(Edge(edge[0],edge[1],edge[2],edge[3],edge[4],
name, init_val,hold_val))
#pdb.set_trace()
unique_group_channels = []
for edge in edges:
is_found = False
for ugc in unique_group_channels:
if edge.is_same(ugc):
is_found = True
if not is_found:
unique_group_channels.append(edge)
from operator import itemgetter
edge_timings_by_group = sorted(edge_timings, key=itemgetter(2))
edge_timings_by_group_list = []
for edge in edge_timings_by_group:
edge_timings_by_group_list.append(edge.tolist())
#print edge_timings
for p in edge_timings_by_group_list: print p
unique_times = []
for edge in edges:
is_found = False
for t in unique_times:
if edge.time == t.time:
is_found = True
if not is_found:
unique_times.append(edge)
#pdb.set_trace()
for ugc in unique_group_channels:
s = ugc.name.rjust(longest_name)
current_edge = edges[0]
previous_edge = edges[0]
is_first = True
for t in unique_times:
is_found = False
for edge in edges:
if edge.timing_group == ugc.timing_group and edge.channel_number == ugc.channel_number and edge.time == t.time:
is_found = True
current_edge = edge
if is_first:
s = s + '|' + str('%7s' % str(current_edge.initial_value))
is_first = False
previous_edge.value = current_edge.initial_value
if previous_edge.value == 'None ':
previous_edge.value = 0
if is_found:
if current_edge.value > previous_edge.value:
s += '^' + str('%7s' % str(current_edge.value))
else:
s += 'v' + str('%7s' % str(current_edge.value))
previous_edge = current_edge
else:
s += '|' + '.'*7
s = s + '|' + str('%7s' % str(current_edge.holding_value))
print s
s = "Time (ms)".rjust(longest_name) + '|' + str('%7s' % str("Initial"))
for t in unique_times:
s += '|' + str('%7s' % str(t.time))
s = s + '|' + str('%7s' % str("Holding"))
print s
| gemelkelabs/timing_system_software | server_py_files/utilities/timing_diagram.py | Python | mit | 5,170 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
file_path='pc03-data.txt'
with open(file_path,'r') as fp:
cont=fp.read()
rule=re.compile('[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]')
s=re.findall(rule,cont)
s1=''.join(s)+'.php'
old_url='http://www.pythonchallenge.com/pc/def/ocr.html'
new_url=old_url.replace('ocr.html',s1)
print new_url | yunyu2019/blog | python/python_challenge/03/3.py | Python | apache-2.0 | 349 |
"""
Copyright 2010-2011,2014,2018 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
import sys
MAIN_TMPL = """\
id: uhd_usrp_${sourk}
label: 'UHD: USRP ${sourk.title()}'
flags: [python, cpp, throttle]
parameters:
- id: type
label: ${direction.title()}put Type
dtype: enum
options: [fc32, sc16, item32]
option_labels: [Complex float32, Complex int16, VITA word32]
option_attributes:
type: [fc32, sc16, s32]
hide: part
- id: otw
label: Wire Format
dtype: enum
options: ['', sc16, sc12, sc8]
option_labels: [Automatic, Complex int16, Complex int12, Complex int8]
hide: ${'$'}{ 'none' if otw else 'part'}
- id: stream_args
label: Stream args
dtype: string
options: ['', peak=0.003906]
option_labels: ['', peak=0.003906]
hide: ${'$'}{ 'none' if stream_args else 'part'}
- id: stream_chans
label: Stream channels
dtype: int_vector
default: '[]'
hide: ${'$'}{ 'none' if stream_chans else 'part'}
- id: dev_addr
label: Device Address
dtype: string
default: '""'
hide: ${'$'}{ 'none' if dev_addr else 'part'}
- id: dev_args
label: Device Arguments
dtype: string
hide: ${'$'}{ 'none' if dev_args else 'part'}
- id: sync
label: Sync
dtype: enum
options: [sync, pc_clock, pc_clock_next_pps, gps_time, none]
option_labels: [Unknown PPS, PC Clock, PC Clock on Next PPS, GPS Time on Next PPS, No Sync]
hide: ${'$'}{ 'none' if sync else 'part'}
- id: start_time
label: Start Time (seconds)
dtype: real
default: -1.0
options: [-1.0]
option_labels: [Default]
hide: ${'$'}{ 'none' if start_time >= 0.0 else 'part' }
- id: clock_rate
label: Clock Rate (Hz)
dtype: real
default: 0e0
options: [0e0, 200e6, 184.32e6, 153.6e6, 125.0e6, 122.88e6, 120e6, 30.72e6]
option_labels: [Default, 200 MHz, 184.32 MHz, 153.6 MHz, 125 MHz, 122.88 MHz, 120 MHz, 30.72 MHz]
hide: ${'$'}{ 'none' if clock_rate else 'part' }
- id: num_mboards
label: Num Mboards
dtype: int
default: 1
options: [1, 2, 3, 4, 5, 6, 7, 8]
hide: part
% for m in range(max_mboards):
- id: clock_source${m}
label: 'Mb${m}: Clock Source'
dtype: string
options: ['', internal, external, mimo, gpsdo]
option_labels: [Default, Internal, External, MIMO Cable, O/B GPSDO]
hide: ${'$'}{ 'all' if not (num_mboards > ${m}) else ( 'none' if clock_source${m} else 'part')}
- id: time_source${m}
label: 'Mb${m}: Time Source'
dtype: string
options: ['', external, mimo, gpsdo]
option_labels: [Default, External, MIMO Cable, O/B GPSDO]
hide: ${'$'}{ 'all' if not (num_mboards > ${m}) else ('none' if time_source${m} else 'part')}
- id: sd_spec${m}
label: 'Mb${m}: Subdev Spec'
dtype: string
hide: ${'$'}{ 'all' if not (num_mboards > ${m}) else ('none' if sd_spec${m} else 'part')}
% endfor
- id: nchan
label: Num Channels
dtype: int
default: 1
options: [ ${", ".join([str(n) for n in range(1, max_nchan+1)])} ]
hide: part
- id: samp_rate
label: Samp rate (Sps)
dtype: real
default: samp_rate
${params}
inputs:
- domain: message
id: command
optional: true
% if sourk == 'source':
outputs:
% endif
- domain: stream
dtype: ${'$'}{type.type}
multiplicity: ${'$'}{nchan}
% if sourk == 'sink':
outputs:
- domain: message
id: async_msgs
optional: true
% endif
templates:
imports: |-
from gnuradio import uhd
import time
make: |
uhd.usrp_${sourk}(
${'%'} if clock_rate():
",".join((${'$'}{dev_addr}, ${'$'}{dev_args}, "master_clock_rate=${'$'}{clock_rate}")),
${'%'} else:
",".join((${'$'}{dev_addr}, ${'$'}{dev_args})),
${'%'} endif
uhd.stream_args(
cpu_format="${'$'}{type}",
${'%'} if otw:
otw_format="${'$'}{otw}",
${'%'} endif
${'%'} if stream_args:
args=${'$'}{stream_args},
${'%'} endif
${'%'} if eval(stream_chans):
channels=${'$'}{stream_chans},
${'%'} else:
channels=list(range(0,${'$'}{nchan})),
${'%'} endif
),
% if sourk == 'sink':
${'%'} if len_tag_name:
${'$'}{len_tag_name},
${'%'} endif
% endif
)
% for m in range(max_mboards):
${'%'} if context.get('num_mboards')() > ${m}:
########################################################################
${'%'} if context.get('clock_source${m}')():
self.${'$'}{id}.set_clock_source(${'$'}{${'clock_source' + str(m)}}, ${m})
${'%'} endif
########################################################################
${'%'} if context.get('time_source${m}')():
self.${'$'}{id}.set_time_source(${'$'}{${'time_source' + str(m)}}, ${m})
${'%'} endif
########################################################################
${'%'} if context.get('sd_spec${m}')():
self.${'$'}{id}.set_subdev_spec(${'$'}{${'sd_spec' + str(m)}}, ${m})
${'%'} endif
########################################################################
${'%'} endif
% endfor # for m in range(max_mboards)
self.${'$'}{id}.set_samp_rate(${'$'}{samp_rate})
${'%'} if sync == 'sync':
self.${'$'}{id}.set_time_unknown_pps(uhd.time_spec(0))
${'%'} elif sync == 'pc_clock':
self.${'$'}{id}.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
${'%'} elif sync == 'pc_clock_next_pps':
_last_pps_time = self.${'$'}{id}.get_time_last_pps().get_real_secs()
# Poll get_time_last_pps() every 50 ms until a change is seen
while(self.${'$'}{id}.get_time_last_pps().get_real_secs() == _last_pps_time):
time.sleep(0.05)
# Set the time to PC time on next PPS
self.${'$'}{id}.set_time_next_pps(uhd.time_spec(int(time.time()) + 1.0))
# Sleep 1 second to ensure next PPS has come
time.sleep(1)
${'%'} elif sync == 'gps_time':
# Set the time to GPS time on next PPS
# get_mboard_sensor("gps_time") returns just after the PPS edge,
# thus add one second and set the time on the next PPS
self.${'$'}{id}.set_time_next_pps(uhd.time_spec(self.${'$'}{id}.get_mboard_sensor("gps_time").to_int() + 1.0))
# Sleep 1 second to ensure next PPS has come
time.sleep(1)
${'%'} else:
# No synchronization enforced.
${'%'} endif
% for n in range(max_nchan):
${'%'} if context.get('nchan')() > ${n}:
self.${'$'}{id}.set_center_freq(${'$'}{${'center_freq' + str(n)}}, ${n})
${'%'} if context.get('ant${n}')():
self.${'$'}{id}.set_antenna(${'$'}{${'ant' + str(n)}}, ${n})
${'%'} endif
${'%'} if context.get('bw${n}')():
self.${'$'}{id}.set_bandwidth(${'$'}{${'bw' + str(n)}}, ${n})
${'%'} endif
% if sourk == 'source':
${'%'} if context.get('rx_agc${n}')() == 'Enabled':
self.${'$'}{id}.set_rx_agc(True, ${n})
${'%'} elif context.get('rx_agc${n}')() == 'Disabled':
self.${'$'}{id}.set_rx_agc(False, ${n})
${'%'} endif
${'%'} if context.get('rx_agc${n}')() != 'Enabled':
${'%'} if context.get('gain_type' + '${n}')() == 'normalized':
self.${'$'}{id}.set_normalized_gain(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} elif context.get('gain_type' + '${n}')() == 'power':
self.${'$'}{id}.set_power_reference(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} else:
self.${'$'}{id}.set_gain(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} endif
${'%'} endif # if rx_agc${n} != 'Enabled'
${'%'} if context.get('dc_offs_enb${n}')() in ('auto', 'disabled'):
self.${'$'}{id}.set_auto_dc_offset(${'$'}{True if ${'dc_offs_enb' + str(n)} == 'auto' else False}, ${n})
${'%'} elif context.get('dc_offs_enb${n}')() == 'manual':
self.${'$'}{id}.set_dc_offset(${'$'}{${'dc_offs' + str(n)}}, ${n})
${'%'} endif
${'%'} if context.get('iq_imbal_enb${n}')() in ('auto', 'disabled'):
self.${'$'}{id}.set_auto_iq_balance(${'$'}{True if ${'iq_imbal_enb' + str(n)} == 'auto' else False}, ${n})
${'%'} elif context.get('iq_imbal_enb${n}')() == 'manual':
self.${'$'}{id}.set_iq_balance(${'$'}{${'iq_imbal' + str(n)}}, ${n})
${'%'} endif
% else:
${'%'} if context.get('gain_type' + '${n}')() == 'normalized':
self.${'$'}{id}.set_normalized_gain(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} elif context.get('gain_type' + '${n}')() == 'power':
self.${'$'}{id}.set_power_reference(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} else:
self.${'$'}{id}.set_gain(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} endif
% endif # if sourk == 'source'
${'%'} if context.get('show_lo_controls')():
self.${'$'}{id}.set_lo_source(${'$'}{${'lo_source' + str(n)}}, uhd.ALL_LOS, ${n})
self.${'$'}{id}.set_lo_export_enabled(${'$'}{${'lo_export' + str(n)}}, uhd.ALL_LOS, ${n})
${'%'} endif
${'%'} endif # nchan > n
% endfor # for n in range(max_nchan)
${'%'} if start_time() >= 0.0:
self.${'$'}{id}.set_start_time(uhd.time_spec(${'$'}{start_time}))
${'%'} endif
callbacks:
- set_samp_rate(${'$'}{samp_rate})
% for n in range(max_nchan):
- set_center_freq(${'$'}{${'center_freq' + str(n)}}, ${n})
% if sourk == 'source':
- ${'$'}{'set_rx_agc(True, ${n})' if context.get('rx_agc${n}')() == 'Enabled' else ''}
- ${'$'}{'set_rx_agc(False, ${n})' if context.get('rx_agc${n}')() == 'Disabled' else ''}
- |
${'%'} if context.get('rx_agc${n}')() != 'Enabled':
${'%'} if context.get('gain_type' + '${n}')() == 'normalized':
self.${'$'}{id}.set_normalized_gain(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} elif context.get('gain_type' + '${n}')() == 'power':
self.${'$'}{id}.set_power_reference(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} else:
self.${'$'}{id}.set_gain(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} endif
${'%'} endif
% else:
- |
${'%'} if context.get('gain_type' + '${n}')() == 'normalized':
self.${'$'}{id}.set_normalized_gain(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} elif context.get('gain_type' + '${n}')() == 'power':
self.${'$'}{id}.set_power_reference(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} else:
self.${'$'}{id}.set_gain(${'$'}{${'gain' + str(n)}}, ${n})
${'%'} endif
% endif
- ${'$'}{'set_lo_source(' + lo_source${n} + ', uhd.ALL_LOS, ${n})' if show_lo_controls else ''}
- ${'$'}{'set_lo_export_enabled(' + lo_export${n} + ', uhd.ALL_LOS, ${n})' if show_lo_controls else ''}
- set_antenna(${'$'}{${'ant' + str(n)}}, ${n})
- set_bandwidth(${'$'}{${'bw' + str(n)}}, ${n})
% endfor
cpp_templates:
includes: [ '#include <gnuradio/uhd/usrp_${sourk}.h>' ]
declarations: 'gr::uhd::usrp_${sourk}::sptr ${'$'}{id};'
make: |
this->${'$'}{id} = gr::uhd::usrp_${sourk}::make(
::uhd::device_addr_t("${'$'}{",".join((str(dev_addr).strip('"\\''), str(dev_args).strip('"\\''))) if len(str(dev_args).strip('"\\'')) > 0 else dev_addr.strip('"\\'')}"),
::uhd::stream_args_t("${'$'}{type}", "${'$'}{otw}"));
% for m in range(max_mboards):
${'%'} if context.get('num_mboards')() > ${m}:
${'%'} if context.get('sd_spec${m}')():
this->${'$'}{id}->set_subdev_spec(${'$'}{${'sd_spec' + str(m)}}, ${m});
${'%'} endif
${'%'} if context.get('time_source${m}')():
this->${'$'}{id}->set_time_source(${'$'}{${'time_source' + str(m)}}, ${m});
${'%'} endif
${'%'} if context.get('clock_source${m}')():
this->${'$'}{id}->set_clock_source("${'$'}{${'clock_source' + str(m)}.strip('\\'')}", ${m});
${'%'} endif
${'%'} endif
% endfor
this->${'$'}{id}->set_samp_rate(${'$'}{samp_rate});
${'%'} if sync == 'sync':
this->${'$'}{id}->set_time_unknown_pps(::uhd::time_spec_t());
${'%'} elif sync == 'pc_clock':
this->${'$'}{id}->set_time_now(::uhd::time_spec_t(time(NULL)), ::uhd::usrp::multi_usrp::ALL_MBOARDS);
${'%'} else:
// No synchronization enforced.
${'%'} endif
% for n in range(max_nchan):
${'%'} if context.get('nchan')() > ${n}:
this->${'$'}{id}->set_center_freq(${'$'}{${'center_freq' + str(n)}}, ${n});
% if sourk == 'source':
${'%'} if context.get('rx_agc${n}')() == 'Enabled':
this->${'$'}{id}->set_rx_agc(True, ${n});
${'%'} elif context.get('rx_agc${n}')() == 'Disabled':
this->${'$'}{id}->set_rx_agc(False, ${n});
${'%'} endif
${'%'} if context.get('rx_agc${n}')() != 'Enabled':
${'%'} if context.get('gain_type' + '${n}')() == 'normalized':
this->${'$'}{id}->set_normalized_gain(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} elif context.get('gain_type' + '${n}')() == 'power':
this->${'$'}{id}->set_power_reference(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} else:
this->${'$'}{id}->set_gain(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} endif
${'%'} endif
% else:
${'%'} if context.get('gain_type' + '${n}')() == 'normalized':
this->${'$'}{id}->set_normalized_gain(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} elif context.get('gain_type' + '${n}')() == 'power':
this->${'$'}{id}->set_power_reference(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} else:
this->${'$'}{id}->set_gain(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} endif
% endif
${'%'} if context.get('ant${n}')():
this->${'$'}{id}->set_antenna(${'$'}{${'ant' + str(n)}}, ${n});
${'%'} endif
${'%'} if context.get('bw${n}')():
this->${'$'}{id}->set_bandwidth(${'$'}{${'bw' + str(n)}}, ${n});
${'%'} endif
${'%'} if context.get('show_lo_controls')():
this->${'$'}{id}->set_lo_source(${'$'}{${'lo_source' + str(n)}}, ::uhd::usrp::multi_usrp::ALL_LOS, ${n});
this->${'$'}{id}->set_lo_export_enabled(${'$'}{${'lo_export' + str(n)}}, ::uhd::usrp::multi_usrp::ALL_LOS, ${n});
${'%'} endif
${'%'} endif
% endfor
${'%'} if clock_rate():
this->${'$'}{id}->set_clock_rate(${'$'}{clock_rate}, ::uhd::usrp::multi_usrp::ALL_MBOARDS);
${'%'} endif
${'%'} if start_time() >= 0.0:
this->${'$'}{id}->set_start_time(::uhd::time_spec_t(${'$'}{float(start_time)}));
${'%'} endif
link: ['gnuradio-uhd uhd']
callbacks:
- set_samp_rate(${'$'}{samp_rate})
% for n in range(max_nchan):
- set_center_freq(${'$'}{${'center_freq' + str(n)}}, ${n})
% if sourk == 'source':
- ${'$'}{'set_rx_agc(True, ${n})' if context.get('rx_agc${n}')() == 'Enabled' else ''}
- ${'$'}{'set_rx_agc(False, ${n})' if context.get('rx_agc${n}')() == 'Disabled' else ''}
- |
${'%'} if context.get('rx_agc${n}')() != 'Enabled':
${'%'} if context.get('gain_type' + '${n}')() == 'normalized':
this->${'$'}{id}->set_normalized_gain(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} elif context.get('gain_type' + '${n}')() == 'power':
this->${'$'}{id}->set_power_reference(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} else:
this->${'$'}{id}->set_gain(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} endif
${'%'} endif
% else:
- |
${'%'} if context.get('gain_type' + '${n}')() == 'normalized':
this->${'$'}{id}->set_normalized_gain(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} elif context.get('gain_type' + '${n}')() == 'power':
this->${'$'}{id}->set_power_reference(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} else:
this->${'$'}{id}->set_gain(${'$'}{${'gain' + str(n)}}, ${n});
${'%'} endif
% endif
- ${'$'}{'set_lo_source(' + lo_source${n} + ', ::uhd::usrp::multi_usrp::ALL_LOS, ${n})' if show_lo_controls else ''}
- ${'$'}{'set_lo_export_enabled(' + lo_export${n} + ', ::uhd::usrp::multi_usrp::ALL_LOS, ${n})' if show_lo_controls else ''}
- set_antenna(${'$'}{${'ant' + str(n)}}, ${n})
- set_bandwidth(${'$'}{${'bw' + str(n)}}, ${n})
% endfor
documentation: |-
The UHD USRP ${sourk.title()} Block:
Device Address:
The device address is a delimited string used to locate UHD devices on your system. \\
If left blank, the first UHD device found will be used. \\
Use the device address to specify a specific device or list of devices.
USRP1 Example: serial=12345678
USRP2 Example: addr=192.168.10.2
USRP2 Example: addr0=192.168.10.2, addr1=192.168.10.3
${direction.title()} Type:
This parameter controls the data type of the stream in gnuradio.
Wire Format:
This parameter controls the form of the data over the bus/network. \
Complex bytes may be used to trade off precision for bandwidth. \
Not all formats are supported on all devices.
Stream Args:
Optional arguments to be passed in the UHD streamer object. \
Streamer args is a list of key/value pairs; usage is determined by the implementation.
Ex: the scalar key affects the scaling between 16 and 8 bit integers in sc8 wire format.
Num Motherboards:
Selects the number of USRP motherboards in this device configuration.
Reference Source:
Where the motherboard should sync its time and clock references.
If source and sink blocks reference the same device,
it is only necessary to set the reference source on one of the blocks.
Subdevice specification:
Each motherboard should have its own subdevice specification \\
and all subdevice specifications should be the same length. \\
Select the subdevice or subdevices for each channel using a markup string. \\
The markup string consists of a list of dboard_slot:subdev_name pairs (one pair per channel). \\
If left blank, the UHD will try to select the first subdevice on your system. \\
See the application notes for further details.
Single channel example: :AB
Dual channel example: :A :B
Num Channels:
Selects the total number of channels in this multi-USRP configuration.
Ex: 4 motherboards with 2 channels per board = 8 channels total
Sample rate:
The sample rate is the number of samples per second input by this block. \\
The UHD device driver will try its best to match the requested sample rate. \\
If the requested rate is not possible, the UHD block will print an error at runtime.
Center frequency:
The center frequency is the overall frequency of the RF chain. \\
For greater control of how the UHD tunes elements in the RF chain, \\
pass a tune_request object rather than a simple target frequency.
Tuning with an LO offset example: uhd.tune_request(freq, lo_off)
Tuning without DSP: uhd.tune_request(target_freq, dsp_freq=0, \\
dsp_freq_policy=uhd.tune_request.POLICY_MANUAL)
Antenna:
For subdevices with only one antenna, this may be left blank. \\
Otherwise, the user should specify one of the possible antenna choices. \\
See the daughterboard application notes for the possible antenna choices.
Bandwidth:
To use the default bandwidth filter setting, this should be zero. \\
Only certain subdevices have configurable bandwidth filters. \\
See the daughterboard application notes for possible configurations.
Length tag key (Sink only):
When a nonempty string is given, the USRP sink will look for length tags \\
to determine transmit burst lengths.
See the UHD manual for more detailed documentation:
http://uhd.ettus.com
file_format: 1
"""
PARAMS_TMPL = """
- id: center_freq${n}
label: 'Ch${n}: Center Freq (Hz)'
category: RF Options
dtype: raw
default: '0'
hide: ${'$'}{ 'none' if (nchan > ${n}) else 'all' }
% if sourk == 'source':
- id: rx_agc${n}
label: 'Ch${n}: AGC'
category: RF Options
dtype: string
default: 'Default'
options: ['Default', 'Disabled', 'Enabled']
option_labels: [Default, Disabled, Enabled]
hide: ${'$'}{ 'none' if (nchan > ${n}) else 'all' }
% endif
- id: gain${n}
label: 'Ch${n}: Gain Value'
category: RF Options
dtype: float
default: '0'
% if sourk == 'source':
hide: ${'$'}{ 'none' if nchan > ${n} and rx_agc${n} != 'Enabled' else 'all' }
% else:
hide: ${'$'}{ 'none' if nchan > ${n} else 'all' }
% endif
- id: gain_type${n}
label: 'Ch${n}: Gain Type'
category: RF Options
dtype: enum
options: [default, normalized, power]
option_labels: [Absolute (dB), Normalized, Absolute Power (dBm)]
% if sourk == 'source':
hide: ${'$'}{ 'all' if nchan <= ${n} or rx_agc${n} == 'Enabled' else ('part' if (eval('gain_type' + str(${n})) == 'default') else 'none')}
% else:
hide: ${'$'}{ 'all' if nchan <= ${n} else ('part' if (eval('gain_type' + str(${n})) == 'default') else 'none')}
% endif
- id: ant${n}
label: 'Ch${n}: Antenna'
category: RF Options
dtype: string
% if sourk == 'source':
options: ['"TX/RX"', '"RX2"', '"RX1"']
option_labels: [TX/RX, RX2, RX1]
default: '"RX2"'
% else:
options: ['"TX/RX"']
option_labels: [TX/RX]
% endif
hide: ${'$'}{ 'all' if not nchan > ${n} else ('none' if eval('ant' + str(${n})) else 'part')}
- id: bw${n}
label: 'Ch${n}: Bandwidth (Hz)'
category: RF Options
dtype: real
default: '0'
hide: ${'$'}{ 'all' if not nchan > ${n} else ('none' if eval('bw' + str(${n})) else 'part')}
- id: lo_source${n}
label: 'Ch${n}: LO Source'
category: RF Options
dtype: string
default: internal
options: [internal, external, companion]
hide: ${'$'}{ 'all' if not nchan > ${n} else ('none' if show_lo_controls else 'all')}
- id: lo_export${n}
label: 'Ch${n}: LO Export'
category: RF Options
dtype: bool
default: 'False'
options: ['True', 'False']
hide: ${'$'}{ 'all' if not nchan > ${n} else ('none' if show_lo_controls else 'all')}
% if sourk == 'source':
- id: dc_offs_enb${n}
label: 'Ch${n}: Enable DC Offset Correction'
category: FE Corrections
dtype: enum
options: [default, auto, disabled, manual]
option_labels: [Default, Automatic, Disabled, Manual]
hide: ${'$'}{ 'all' if not nchan > ${n} else 'part'}
- id: dc_offs${n}
label: 'Ch${n}: DC Offset Correction Value'
category: FE Corrections
dtype: complex
default: 0+0j
hide: ${'$'}{ 'all' if not dc_offs_enb${n} == 'manual' else 'part'}
- id: iq_imbal_enb${n}
label: 'Ch${n}: Enable IQ Imbalance Correction'
category: FE Corrections
dtype: enum
options: [default, auto, disabled, manual]
option_labels: [Default, Automatic, Disabled, Manual]
hide: ${'$'}{ 'all' if not nchan > ${n} else 'part'}
- id: iq_imbal${n}
label: 'Ch${n}: IQ imbalance Correction Value'
category: FE Corrections
dtype: complex
default: 0+0j
hide: ${'$'}{ 'all' if not iq_imbal_enb${n} == 'manual' else 'part'}
% endif
"""
SHOW_LO_CONTROLS_PARAM = """
- id: show_lo_controls
label: Show LO Controls
category: Advanced
dtype: bool
default: 'False'
hide: part
"""
TSBTAG_PARAM = """
- id: len_tag_name
label: TSB tag name
dtype: string
default: '""'
hide: ${ 'none' if len(str(len_tag_name)) else 'part'}
"""
TSBTAG_ARG = """
${'%'} if len_tag_name():
${'$'}{len_tag_name},
${'%'} endif
"""
def parse_tmpl(_tmpl, **kwargs):
""" Render _tmpl using the kwargs. """
from mako.template import Template
block_template = Template(_tmpl)
return str(block_template.render(**kwargs))
MAX_NUM_MBOARDS = 8
MAX_NUM_CHANNELS = MAX_NUM_MBOARDS * 4
if __name__ == '__main__':
for file in sys.argv[1:]:
if file.endswith('source.block.yml'):
sourk = 'source'
direction = 'out'
elif file.endswith('sink.block.yml'):
sourk = 'sink'
direction = 'in'
else:
raise Exception('is % a source or sink?' % file)
params = ''.join([
parse_tmpl(PARAMS_TMPL, n=n, sourk=sourk)
for n in range(MAX_NUM_CHANNELS)
])
params += SHOW_LO_CONTROLS_PARAM
if sourk == 'sink':
params += TSBTAG_PARAM
lentag_arg = TSBTAG_ARG
else:
lentag_arg = ''
open(file, 'w').write(
parse_tmpl(
MAIN_TMPL,
lentag_arg=lentag_arg,
max_nchan=MAX_NUM_CHANNELS,
max_mboards=MAX_NUM_MBOARDS,
params=params,
sourk=sourk,
direction=direction,
)
)
| dl1ksv/gnuradio | gr-uhd/grc/gen_uhd_usrp_blocks.py | Python | gpl-3.0 | 25,116 |
import pytest
import expipe
import subprocess
import click
from click.testing import CliRunner
import quantities as pq
import os.path as op
from expipe_plugin_cinpla.intan import IntanPlugin
from expipe_plugin_cinpla.electrical_stimulation import ElectricalStimulationPlugin
from expipe_plugin_cinpla.main import CinplaPlugin
expipe.ensure_testing()
@click.group()
@click.pass_context
def cli(ctx):
pass
IntanPlugin().attach_to_cli(cli)
ElectricalStimulationPlugin().attach_to_cli(cli)
CinplaPlugin().attach_to_cli(cli)
def run_command(command_list, inp=None):
runner = CliRunner()
result = runner.invoke(cli, command_list, input=inp)
if result.exit_code != 0:
print(result.output)
raise result.exception
def test_intan():#module_teardown_setup_project_setup):
currdir = op.abspath(op.dirname(__file__))
intan_path = op.join(currdir, 'test_data', 'intan',
'test-rat_2017-06-23_11-15-46_1',
'test_170623_111545_stim.rhs')
action_id = 'test-rat-230617-01'
data_path = op.join(expipe.settings['data_path'],
pytest.USER_PAR.project_id,
action_id)
if op.exists(data_path):
import shutil
shutil.rmtree(data_path)
run_command(['register-intan', intan_path, '--no-move'], inp='y')
run_command(['process-intan', action_id])
run_command(['analyse', action_id, '--spike-stat', '--psd', '--tfr','--spike-stat'])
def test_intan_ephys():#module_teardown_setup_project_setup):
currdir = op.abspath(op.dirname(__file__))
intan_ephys_path = op.join(currdir, 'test_data', 'intan',
'test-rat_2017-06-23_11-15-46_1')
action_id = 'test-rat-230617-01'
data_path = op.join(expipe.settings['data_path'],
pytest.USER_PAR.project_id,
action_id)
if op.exists(data_path):
import shutil
shutil.rmtree(data_path)
run_command(['register-intan-ephys', intan_ephys_path, '--no-move'], inp='y')
run_command(['process-intan-ephys', action_id])
run_command(['analyse', action_id, '--all'])
| CINPLA/expipe-dev | expipe-plugin-cinpla/tests/test_intan.py | Python | gpl-3.0 | 2,175 |
"""
Backtracking - Valid Sudoku (medium)
Description:
Write a program to solve a Sudoku puzzle by filling the empty cells.
A sudoku solution must satisfy all of the following rules:
1. Each of the digits 1-9 must occur exactly once in each row.
2. Each of the digits 1-9 must occur exactly once in each column.
3. Each of the digits 1-9 must occur exactly once in each of the 9 3x3 sub-boxes of the grid.
The '.' character indicates empty cells.
Constraints:
1. board.length == 9
2. board[i].length == 9
3. board[i][j] is a digit or '.'.
4. It is guaranteed that the input board has only one solution.
Example:
Input: board =
[["5","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]]
Output:
[["5","3","4","6","7","8","9","1","2"],
["6","7","2","1","9","5","3","4","8"],
["1","9","8","3","4","2","5","6","7"],
["8","5","9","7","6","1","4","2","3"],
["4","2","6","8","5","3","7","9","1"],
["7","1","3","9","2","4","8","5","6"],
["9","6","1","5","3","7","2","8","4"],
["2","8","7","4","1","9","6","3","5"],
["3","4","5","2","8","6","1","7","9"]]
LeetCode: https://leetcode-cn.com/problems/valid-sudoku/
"""
def sudoku_solver(board: list) -> None:
pass
if __name__ == "__main__":
board = [["5","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]]
print(sudoku_solver(board)) | dreamibor/Algorithms-and-Data-Structures-Using-Python | practice/implementation/backtracking/sudoku_solver.py | Python | gpl-3.0 | 2,004 |
#coding=utf-8
from flask.ext.login import current_user
from permission import Permission
from .. import db
from ..models import Ticket, Record
# 正则验证时间格式 xxxx-xx-xx
re_date = ('(^((1[8-9]\d{2})|([2-9]\d{3}))(-)(10|12|0?[13578])(-)(3[01]|[12][0-9]|0?[1-9])$)|'
'(^((1[8-9]\d{2})|([2-9]\d{3}))(-)(11|0?[469])(-)(30|[12][0-9]|0?[1-9])$)|(^((1[8-9]\d{2})|'
'([2-9]\d{3}))(-)(0?2)(-)(2[0-8]|1[0-9]|0?[1-9])$)|(^([2468][048]00)(-)(0?2)(-)(29)$)|'
'(^([3579][26]00)(-)(0?2)(-)(29)$)|(^([1][89][0][48])(-)(0?2)(-)(29)$)|(^([2-9][0-9][0][48])(-)(0?2)(-)(29)$)|'
'(^([1][89][2468][048])(-)(0?2)(-)(29)$)|(^([2-9][0-9][2468][048])(-)(0?2)(-)(29)$)|'
'(^([1][89][13579][26])(-)(0?2)(-)(29)$)|(^([2-9][0-9][13579][26])(-)(0?2)(-)(29)$)')
# 正则则验证IP格式
re_ip = '^((25[0-5]|2[0-4]\d|[01]?\d\d?)($|(?!\.$)\.)){4}$'
# 验证移动电话
re_cellphone = '^1\d{10}$'
# 验证邮箱
re_email = '^(\w)+(\.\w+)*@(\w)+((\.\w+)+)$'
# 验证图形id
re_graph_id = '(^[\d;]*)\d;$'
# 记录操作
def record_sql(status, table, table_id, item, value):
'''记录cmdb操作记录'''
record = Record(
username=current_user.alias,
status=status,
table=table,
table_id=table_id,
item=item,
value=value,
)
db.session.add(record)
db.session.commit()
# 选择框初始化
def init_checkbox(thead, checkbox):
'''处理需要隐藏的字段
checkbox 是一个列表,对应需要隐藏的字段的索引
'''
if checkbox:
if isinstance(checkbox, unicode):
checkbox = eval(checkbox)
for th in thead:
if str(th[0]) in checkbox:
th [3] = True
else:
th[3] = False
else:
for box in range(0, len(thead)):
thead[box][3] = False
return thead
# 获取工单信息
def ticket_status():
status = {}
role = current_user.role
if role == "QUERY":
all_ticket = Ticket.query.filter_by(sales=current_user.alias)
elif getattr(Permission, role) >= 29:
all_ticket = Ticket.query
else:
all_ticket = False
if all_ticket:
status["audit"] = all_ticket.filter_by(status="audit").count()
status["approved"] = all_ticket.filter_by(status="approved").count()
status["processing"] = all_ticket.filter_by(status="processing").count()
return status
| Leon109/IDCMS-Web | web/app/utils/utils.py | Python | apache-2.0 | 2,410 |
from django.http import Http404
from django.views.generic import ListView, DetailView, CreateView
from .models import Article
class ArticlesView(ListView):
model = Article
template_name = 'articles.html'
context_object_name = 'articles'
queryset = Article.objects.filter(published=True)
class ArticleDetailView(DetailView):
model = Article
template_name = 'article.html'
def get_object(self, queryset=None):
article = super(ArticleDetailView, self).get_object(queryset)
if not article.published and article.owner != self.request.user:
raise Http404()
return article
class ArticleCreateView(CreateView):
model = Article
fields = ('headline', 'description')
template_name = 'article_form.html'
| wahuneke/django-skd-smoke | example_project/articles/views.py | Python | mit | 778 |
from redbot.core import commands
from redbot.core.i18n import Translator
__all__ = ("trivia_stop_check",)
_ = Translator("Trivia", __file__)
def trivia_stop_check():
async def predicate(ctx: commands.GuildContext) -> bool:
session = ctx.cog._get_trivia_session(ctx.channel)
if session is None:
raise commands.CheckFailure(_("There is no ongoing trivia session in this channel."))
author = ctx.author
auth_checks = (
await ctx.bot.is_owner(author),
await ctx.bot.is_mod(author),
await ctx.bot.is_admin(author),
author == ctx.guild.owner,
author == session.ctx.author,
)
return any(auth_checks)
return commands.permissions_check(predicate)
| palmtree5/Red-DiscordBot | redbot/cogs/trivia/checks.py | Python | gpl-3.0 | 772 |
default_app_config = 'ibms.apps.IBMSConfig'
| parksandwildlife/ibms | ibms_project/ibms/__init__.py | Python | apache-2.0 | 44 |
import serial
import smtplib
import yaml
config = yaml.safe_load(open('config.yml'))
to = config['receiver_email']
gmail_user = config['gmail_user']
gmail_pass = config['gmail_pass']
subject = 'Water your plants'
text = 'Please water your plants. The soil is dry.'
ser = serial.Serial(config['serial_port'], 9600)
def send_email():
smtpserver = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtpserver.login(gmail_user, gmail_pass)
header = 'To:' + to + '\n' + 'From: ' + gmail_user
header = header + '\n' + 'Subject:' + subject + '\n'
print header
msg = header + '\n' + text + ' \n\n'
smtpserver.sendmail(gmail_user, to, msg)
smtpserver.close()
while True:
value = int(ser.readline().rstrip('\r\n'))
print(value)
print repr(value)
if value < 500:
print('Sent email')
send_email()
value = 1023 | florinutz/arduweather | mail.py | Python | gpl-3.0 | 865 |
from .base import HeartbeatThread, Order, RabbitMQAdapter, Shove, ShoveThread
__version__ = '0.1.6'
| mozilla/shove | shove/__init__.py | Python | mpl-2.0 | 102 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from recipe_engine import recipe_api
class GSUtilApi(recipe_api.RecipeApi):
@property
def gsutil_py_path(self):
return self.package_repo_resource('gsutil.py')
def __call__(self, cmd, name=None, use_retry_wrapper=True, version=None,
parallel_upload=False, multithreaded=False, **kwargs):
"""A step to run arbitrary gsutil commands.
Note that this assumes that gsutil authentication environment variables
(AWS_CREDENTIAL_FILE and BOTO_CONFIG) are already set, though if you want to
set them to something else you can always do so using the env={} kwarg.
Note also that gsutil does its own wildcard processing, so wildcards are
valid in file-like portions of the cmd. See 'gsutil help wildcards'.
Arguments:
cmd: list of (string) arguments to pass to gsutil.
Include gsutil-level options first (see 'gsutil help options').
name: the (string) name of the step to use.
Defaults to the first non-flag token in the cmd.
"""
if not name:
name = (t for t in cmd if not t.startswith('-')).next()
full_name = 'gsutil ' + name
gsutil_path = self.gsutil_py_path
cmd_prefix = []
if use_retry_wrapper:
# We pass the real gsutil_path to the wrapper so it doesn't have to do
# brittle path logic.
cmd_prefix = ['--', gsutil_path]
gsutil_path = self.resource('gsutil_smart_retry.py')
if version:
cmd_prefix.extend(['--force-version', version])
if parallel_upload:
cmd_prefix.extend([
'-o',
'GSUtil:parallel_composite_upload_threshold=50M'
])
if multithreaded:
cmd_prefix.extend(['-m'])
if use_retry_wrapper:
# The -- argument for the wrapped gsutil.py is escaped as ---- as python
# 2.7.3 removes all occurrences of --, not only the first. It is unescaped
# in gsutil_wrapper.py and then passed as -- to gsutil.py.
# Note, that 2.7.6 doesn't have this problem, but it doesn't hurt.
cmd_prefix.append('----')
else:
cmd_prefix.append('--')
return self.m.python(full_name, gsutil_path, cmd_prefix + cmd,
infra_step=True, **kwargs)
def upload(self, source, bucket, dest, args=None, link_name='gsutil.upload',
metadata=None, unauthenticated_url=False, **kwargs):
args = [] if args is None else args[:]
# Note that metadata arguments have to be passed before the command cp.
metadata_args = self._generate_metadata_args(metadata)
full_dest = 'gs://%s/%s' % (bucket, dest)
cmd = metadata_args + ['cp'] + args + [source, full_dest]
name = kwargs.pop('name', 'upload')
result = self(cmd, name, **kwargs)
if link_name:
result.presentation.links[link_name] = self._http_url(
bucket, dest, unauthenticated_url=unauthenticated_url)
return result
def download(self, bucket, source, dest, args=None, **kwargs):
args = [] if args is None else args[:]
full_source = 'gs://%s/%s' % (bucket, source)
cmd = ['cp'] + args + [full_source, dest]
name = kwargs.pop('name', 'download')
return self(cmd, name, **kwargs)
def download_url(self, url, dest, args=None, **kwargs):
args = args or []
url = self._normalize_url(url)
cmd = ['cp'] + args + [url, dest]
name = kwargs.pop('name', 'download_url')
self(cmd, name, **kwargs)
def cat(self, url, args=None, **kwargs):
args = args or []
url = self._normalize_url(url)
cmd = ['cat'] + args + [url]
name = kwargs.pop('name', 'cat')
return self(cmd, name, **kwargs)
def copy(self, source_bucket, source, dest_bucket, dest, args=None,
link_name='gsutil.copy', metadata=None, unauthenticated_url=False,
**kwargs):
args = args or []
args += self._generate_metadata_args(metadata)
full_source = 'gs://%s/%s' % (source_bucket, source)
full_dest = 'gs://%s/%s' % (dest_bucket, dest)
cmd = ['cp'] + args + [full_source, full_dest]
name = kwargs.pop('name', 'copy')
result = self(cmd, name, **kwargs)
if link_name:
result.presentation.links[link_name] = self._http_url(
dest_bucket, dest, unauthenticated_url=unauthenticated_url)
def list(self, url, args=None, **kwargs):
args = args or []
url = self._normalize_url(url)
cmd = ['ls'] + args + [url]
name = kwargs.pop('name', 'list')
return self(cmd, name, **kwargs)
def signurl(self, private_key_file, bucket, dest, args=None, **kwargs):
args = args or []
full_source = 'gs://%s/%s' % (bucket, dest)
cmd = ['signurl'] + args + [private_key_file, full_source]
name = kwargs.pop('name', 'signurl')
return self(cmd, name, **kwargs)
def remove_url(self, url, args=None, **kwargs):
args = args or []
url = self._normalize_url(url)
cmd = ['rm'] + args + [url]
name = kwargs.pop('name', 'remove')
self(cmd, name, **kwargs)
def _generate_metadata_args(self, metadata):
result = []
if metadata:
for k, v in sorted(metadata.iteritems(), key=lambda (k, _): k):
field = self._get_metadata_field(k)
param = (field) if v is None else ('%s:%s' % (field, v))
result += ['-h', param]
return result
def _normalize_url(self, url):
gs_prefix = 'gs://'
# Defines the regex that matches a normalized URL.
for prefix in (
gs_prefix,
'https://storage.cloud.google.com/',
'https://storage.googleapis.com/',
):
if url.startswith(prefix):
return gs_prefix + url[len(prefix):]
raise AssertionError("%s cannot be normalized" % url)
@classmethod
def _http_url(cls, bucket, dest, unauthenticated_url=False):
if unauthenticated_url:
base = 'https://storage.googleapis.com/%s/%s'
else:
base = 'https://storage.cloud.google.com/%s/%s'
return base % (bucket, dest)
@staticmethod
def _get_metadata_field(name, provider_prefix=None):
"""Returns: (str) the metadata field to use with Google Storage
The Google Storage specification for metadata can be found at:
https://developers.google.com/storage/docs/gsutil/addlhelp/WorkingWithObjectMetadata
"""
# Already contains custom provider prefix
if name.lower().startswith('x-'):
return name
# See if it's innately supported by Google Storage
if name in (
'Cache-Control',
'Content-Disposition',
'Content-Encoding',
'Content-Language',
'Content-MD5',
'Content-Type',
):
return name
# Add provider prefix
if not provider_prefix:
provider_prefix = 'x-goog-meta'
return '%s-%s' % (provider_prefix, name)
| Shouqun/node-gn | tools/depot_tools/recipes/recipe_modules/gsutil/api.py | Python | mit | 6,855 |
import functools
from corehq.util.es import elasticsearch as elasticsearch_exceptions
from requests import exceptions as requests_exceptions
def require_elasticsearch(fn):
@functools.wraps(fn)
def decorated(*args, **kwargs):
try:
return fn(*args, **kwargs)
except (requests_exceptions.ConnectionError, elasticsearch_exceptions.ConnectionError):
pass
return decorated
| dimagi/commcare-hq | testapps/test_elasticsearch/tests/utils.py | Python | bsd-3-clause | 421 |
from setuptools import setup
setup(name='slackclient',
version='0.15',
description='Python client for Slack.com',
url='http://github.com/slackhq/python-slackclient',
author='Ryan Huber',
author_email='ryan@slack-corp.com',
license='MIT',
packages=['slackclient'],
install_requires=[
'websocket-client',
],
zip_safe=False)
| asmithdigital/slack-sounds | setup.py | Python | mit | 391 |
# Authors: Gilles Louppe, Mathieu Blondel, Maheshakya Wijewardena
# License: BSD 3 clause
import numpy as np
from .base import SelectorMixin
from ..base import BaseEstimator, clone
from ..externals import six
from ..exceptions import NotFittedError
from ..utils.fixes import norm
def _get_feature_importances(estimator, norm_order=1):
"""Retrieve or aggregate feature importances from estimator"""
importances = getattr(estimator, "feature_importances_", None)
if importances is None and hasattr(estimator, "coef_"):
if estimator.coef_.ndim == 1:
importances = np.abs(estimator.coef_)
else:
importances = norm(estimator.coef_, axis=0, ord=norm_order)
elif importances is None:
raise ValueError(
"The underlying estimator %s has no `coef_` or "
"`feature_importances_` attribute. Either pass a fitted estimator"
" to SelectFromModel or call fit before calling transform."
% estimator.__class__.__name__)
return importances
def _calculate_threshold(estimator, importances, threshold):
"""Interpret the threshold value"""
if threshold is None:
# determine default from estimator
est_name = estimator.__class__.__name__
if ((hasattr(estimator, "penalty") and estimator.penalty == "l1") or
"Lasso" in est_name):
# the natural default threshold is 0 when l1 penalty was used
threshold = 1e-5
else:
threshold = "mean"
if isinstance(threshold, six.string_types):
if "*" in threshold:
scale, reference = threshold.split("*")
scale = float(scale.strip())
reference = reference.strip()
if reference == "median":
reference = np.median(importances)
elif reference == "mean":
reference = np.mean(importances)
else:
raise ValueError("Unknown reference: " + reference)
threshold = scale * reference
elif threshold == "median":
threshold = np.median(importances)
elif threshold == "mean":
threshold = np.mean(importances)
else:
raise ValueError("Expected threshold='mean' or threshold='median' "
"got %s" % threshold)
else:
threshold = float(threshold)
return threshold
class SelectFromModel(BaseEstimator, SelectorMixin):
"""Meta-transformer for selecting features based on importance weights.
.. versionadded:: 0.17
Parameters
----------
estimator : object
The base estimator from which the transformer is built.
This can be both a fitted (if ``prefit`` is set to True)
or a non-fitted estimator.
threshold : string, float, optional default None
The threshold value to use for feature selection. Features whose
importance is greater or equal are kept while the others are
discarded. If "median" (resp. "mean"), then the ``threshold`` value is
the median (resp. the mean) of the feature importances. A scaling
factor (e.g., "1.25*mean") may also be used. If None and if the
estimator has a parameter penalty set to l1, either explicitly
or implicitly (e.g, Lasso), the threshold used is 1e-5.
Otherwise, "mean" is used by default.
prefit : bool, default False
Whether a prefit model is expected to be passed into the constructor
directly or not. If True, ``transform`` must be called directly
and SelectFromModel cannot be used with ``cross_val_score``,
``GridSearchCV`` and similar utilities that clone the estimator.
Otherwise train the model using ``fit`` and then ``transform`` to do
feature selection.
norm_order : non-zero int, inf, -inf, default 1
Order of the norm used to filter the vectors of coefficients below
``threshold`` in the case where the ``coef_`` attribute of the
estimator is of dimension 2.
Attributes
----------
estimator_ : an estimator
The base estimator from which the transformer is built.
This is stored only when a non-fitted estimator is passed to the
``SelectFromModel``, i.e when prefit is False.
threshold_ : float
The threshold value used for feature selection.
"""
def __init__(self, estimator, threshold=None, prefit=False, norm_order=1):
self.estimator = estimator
self.threshold = threshold
self.prefit = prefit
self.norm_order = norm_order
def _get_support_mask(self):
# SelectFromModel can directly call on transform.
if self.prefit:
estimator = self.estimator
elif hasattr(self, 'estimator_'):
estimator = self.estimator_
else:
raise ValueError(
'Either fit the model before transform or set "prefit=True"'
' while passing the fitted estimator to the constructor.')
scores = _get_feature_importances(estimator, self.norm_order)
self.threshold_ = _calculate_threshold(estimator, scores,
self.threshold)
return scores >= self.threshold_
def fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y, **fit_params)
return self
def partial_fit(self, X, y=None, **fit_params):
"""Fit the SelectFromModel meta-transformer only once.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
**fit_params : Other estimator specific parameters
Returns
-------
self : object
Returns self.
"""
if self.prefit:
raise NotFittedError(
"Since 'prefit=True', call transform directly")
if not hasattr(self, "estimator_"):
self.estimator_ = clone(self.estimator)
self.estimator_.partial_fit(X, y, **fit_params)
return self
| pprett/scikit-learn | sklearn/feature_selection/from_model.py | Python | bsd-3-clause | 6,968 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
migrator = RawSQLMigration(('corehq', 'sql_proxy_accessors', 'sql_templates'), {
'PL_PROXY_CLUSTER_NAME': settings.PL_PROXY_CLUSTER_NAME
})
class Migration(migrations.Migration):
dependencies = [
('sql_proxy_accessors', '0022_audit_fixups'),
]
operations = [
migrator.get_migration('get_multiple_forms_attachments.sql'),
]
| qedsoftware/commcare-hq | corehq/sql_proxy_accessors/migrations/0023_rename_get_multiple_forms_attachments.py | Python | bsd-3-clause | 548 |
from .logic import LogicAdapter
class NoKnowledgeAdapter(LogicAdapter):
"""
This is a system adapter that is automatically added
to the list of logic adapters durring initialization.
This adapter is placed at the beginning of the list
to be given the highest priority.
"""
def process(self, statement):
"""
If there are no known responses in the database,
then a confidence of 1 should be returned with
the input statement.
Otherwise, a confidence of 0 should be returned.
"""
if self.context.storage.count():
return 0, statement
return 1, statement
| imminent-tuba/thesis | server/chatterbot/chatterbot/adapters/logic/no_knowledge_adapter.py | Python | mit | 659 |
#!/usr/bin/python
import BaseHTTPServer
import subprocess
import os
import string
import json
import random
from urlparse import parse_qs
PORT = 8000
class APIHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
rbufsize = 0
def do_POST(self):
# Get interesting things from the request
self.content_length = int(self.headers.get("Content-Length","0"))
self.content_type = self.headers.get("Content-Type","text/plain")
self.request_body = self.rfile.read(self.content_length)
# If a form submission we need to get the code from the `code`
# field, URL decoding it. urlparse.parse_qs does both for us.
if self.content_type == "application/x-www-form-urlencoded":
d = parse_qs(self.request_body)
self.code = d["code"][0] # FIXME?
# If a file upload, need to parse the multipart MIME format
elif self.content_type.startswith("multipart/form-data;"):
self.code = get_multipart_payload(self.request_body)
# Otherwise the code is simply the entire request body. (This
# is the case when the POST happens when using us as a web
# service.)
else:
self.code = self.request_body
# Determine our response body
self.json = parse_request(self.code)
if self.path == "/api/v1/analyze/":
self.response_body = self.json
else:
self.response_body = make_htmlpage(self.json)
# Send our response
self.send_response(200)
self.send_header("Content-Length", str(len(self.response_body)))
if self.path == "/api/v1/analyze/":
self.send_header("Content-Type", "application/json")
else:
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(self.response_body)
def do_GET(self):
def respond_with_file(name, content_type):
self.send_response(200)
with open(name) as f:
self.response_body = f.read()
self.send_header("Content-Length", str(len(self.response_body)))
self.send_header("Content-Type", content_type)
self.end_headers()
self.wfile.write(self.response_body)
if self.path == '/':
respond_with_file("index.html", "text/html")
elif self.path == '/api/v1/analyze/':
respond_with_file("index.json", "application/json")
elif self.path == '/favicon.ico':
respond_with_file("favicon.ico", "image/x-icon")
else:
self.send_response(404)
self.end_headers()
def get_multipart_payload(s):
"""Assume that `s` is a multipart/form-data entity consisting of a
boundary start, zero or more headers, a blank line, and then zero
or more 'payload' lines until a matching closing boundary. Return
the payload.
"""
lines = s.splitlines()
boundary = lines[0]
result = ""
ix = 0
# Skip everything up to the first blank line, after the
# boundary start and the headers.
while ix < len(lines) and lines[ix] != "":
ix = ix + 1
# Skip the blank line itself.
if ix < len(lines):
ix = ix + 1
# Use everything until a line that starts with boundary.
while ix < len (lines) and not lines[ix].startswith(boundary):
result = result + '\n' + lines[ix]
ix = ix + 1
return result
def name_file():
""" Create a randomized filename so the user cannot count
on us always using the same filename. A mild measure against
some kinds of attacks.
"""
basechars = string.letters + string.digits
filename = ""
for i in range(20):
filename = filename + random.choice(basechars)
filename = filename + ".c"
return filename
def parse_request(request_body):
""" take a request from the client, run the C code through
the clang static analyzer via the shim shell script,
return the string to be used in the response body
"""
filename = name_file()
with open(filename, "w") as f:
f.write(request_body)
try:
clang_result = subprocess.check_output(["bash", "shim.sh", filename], stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError as e:
clang_result = "shim failed!"
os.remove(filename)
return clang_result
def make_htmlpage(strinput):
results = json.loads(strinput)
score = str(results["score"])
errorlist = "<ul>"
def collinecompare(a,b):
# compare line number
# if line number is the same, compare col number
aline, bline, acol, bcol = int(a["line"]), int(b["line"]), int(a["col"]), int(b["col"])
if aline < bline:
return -1
elif aline > bline:
return 1
else:
if acol < bcol:
return -1
elif acol > bcol:
return 1
else:
return 0
resultlist = sorted(results["items"], cmp=collinecompare)
for item in resultlist:
errorlist += "<li><b>Line {line}, column {col}</b>: {prob}<br>in <pre><code>{snippet}</code></pre></li>".format(line=str(item["line"]), col=str(item["col"]), prob=item["desc"], snippet=item["body"])
errorlist += "</ul>"
return "<html><head></head><body>Your score is: <b>{score}</b> {details}</body></html>".format(score=score, details=errorlist)
def main(server_class=BaseHTTPServer.HTTPServer,
handler_class=APIHTTPRequestHandler):
server_address = ('', PORT)
httpd = server_class(server_address, handler_class)
print "serving at port", PORT
httpd.serve_forever()
if __name__ == '__main__':
main()
| brainwane/secureapi | hhserver.py | Python | gpl-3.0 | 5,755 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from httpie.plugins import FormatterPlugin
from ohoh.clients import DebuggerCliClient
class Formatter(DebuggerCliClient, FormatterPlugin):
name = DebuggerCliClient.intro
def __init__(self, env, **kwargs):
self.enabled = True
self.stdout = env.stdout
self.stderr = env.stderr
DebuggerCliClient.__init__(self)
def format_headers(self, headers):
header_list = [
(part[0], part[-1])
for part in [
hstr.partition(u": ") for hstr in
headers.splitlines(False)
]
]
idx = self.find_debug_header(header_list)
if idx < 0:
return headers
else:
# Truncate the displayed header
header, val = header_list[idx]
max_value_length = 80 - len(header) - 1
if len(val) > max_value_length:
truncate = u" [Truncated ({0} bytes) ...]".format(
len(val.strip()))
val = val[:max_value_length - len(truncate)] + truncate
header_list[idx] = header, val
return u"\n".join([
u": ".join((h, v)) for h, v in header_list
])
def format_body(self, content, mime):
if self.debug_token is not None:
self.traceback_text = content
self.cmdloop()
return u""
else:
return content
def preloop(self):
self.stderr.write(self.traceback_text)
self.stderr.write("\n")
| te-je/ohoh | ohoh/clients/httpie.py | Python | mit | 1,602 |
import logging
import math
import threading
import time
from random import randint
from datascryer.config import log_peformance
from datascryer.helper.python import python_3, delta_ms
from datascryer.helper.time_converter import string_to_ms
from datascryer.influxdb.reader import InfluxDBReader
from datascryer.influxdb.writer import InfluxDBWriter
from datascryer.methods.method_collector import MethodCollector
if python_3():
from urllib.error import URLError
else:
from urllib2 import URLError
METHOD = 'method'
LABEL = 'label'
UPDATE_RATE = 'update_rate'
LOOKBACK_RANGE = 'lookback_range'
FORECAST_RANGE = 'forecast_range'
FORECAST_INTERVAL = 'forecast_interval'
METHOD_OPTIONS = 'methodSpecificOptions'
TIME_KEYS = [UPDATE_RATE, LOOKBACK_RANGE, FORECAST_RANGE, FORECAST_INTERVAL]
class Job(threading.Thread):
def __init__(self, config):
threading.Thread.__init__(self)
self.setDaemon(True)
self.__stop_event = threading.Event()
self.__host = config[0]
self.__service = config[1]
self.__command = config[2]['command']
self.__config = config[2]
self.__update_rates = []
for p in self.__config['perf_labels']:
for c in self.__config['config']:
if p == c[LABEL]:
if self.get_method(c) in MethodCollector.classes.keys():
self.__update_rates.append((string_to_ms(c[UPDATE_RATE]), c))
else:
logging.warning("for " + c[METHOD] + " does no class exist")
for u in self.__update_rates:
for k, v in u[1].items():
if k in TIME_KEYS:
u[1][k] = string_to_ms(v)
self.__update_rates = sorted(self.__update_rates, key=lambda x: x[0])
def stop(self):
self.__stop_event.set()
def run(self):
if len(self.__update_rates) == 0:
return
# wait up to 120 seconds, to get some distortion
self.__stop_event.wait(randint(0, 120))
while not self.__stop_event.is_set():
start = time.time()
for update in self.__update_rates:
rate = update[0]
now = time.time()
time_to_wait = round(start - now + rate / 1000, 0)
interrupt = self.__stop_event.wait(time_to_wait)
if interrupt:
return
try:
self.start_calculation(update[1])
except URLError as e:
logging.getLogger(__name__).error("Could not connect to InfluxDB: " + str(e))
except:
logging.getLogger(__name__).error("Job execution failed", exc_info=True)
def start_calculation(self, conf):
start = time.time()
lookback_data = InfluxDBReader.request_past(host=self.__host,
service=self.__service,
# command=self.__command,
performance_label=conf[LABEL],
lookback=conf[LOOKBACK_RANGE])
if not lookback_data:
return
if log_peformance():
logging.getLogger(__name__).debug(
"Fetching data of %s %s %s: %s took %dms" % (
self.__host, self.__service, conf[LABEL], self.get_method(conf), delta_ms(start))
)
start = time.time()
my_class = MethodCollector.classes[self.get_method(conf)]
if 'calc_forecast' in dir(my_class):
forecast_data = my_class. \
calc_forecast(options=conf[METHOD_OPTIONS],
forecast_start=self.calc_start_date(lookback_data[len(lookback_data) - 1][0],
conf[FORECAST_INTERVAL]),
forecast_range=conf[FORECAST_RANGE],
forecast_interval=conf[FORECAST_INTERVAL],
lookback_range=conf[LOOKBACK_RANGE],
lookback_data=lookback_data)
if log_peformance():
logging.getLogger(__name__).debug(
"Calculation data of %s %s %s: %s took %dms" % (
self.__host, self.__service, conf[LABEL], self.get_method(conf), delta_ms(start))
)
start = time.time()
if forecast_data:
InfluxDBWriter.write_forecast(data=forecast_data,
host=self.__host,
service=self.__service,
# command=self.__command,
performance_label=conf[LABEL])
if log_peformance():
logging.getLogger(__name__).debug(
"Writing data of %s %s %s: %s took %dms" % (
self.__host, self.__service, conf[LABEL], self.get_method(conf), delta_ms(start))
)
else:
logging.getLogger(__name__).debug(
"Calculation did not return any data: %s %s %s: %s" % (
self.__host, self.__service, conf[LABEL], self.get_method(conf))
)
elif 'search_anomaly' in dir(my_class):
anomaly_data = my_class.search_anomaly(
options=conf[METHOD_OPTIONS],
lookback_range=conf[LOOKBACK_RANGE],
lookback_data=lookback_data)
if log_peformance():
logging.getLogger(__name__).debug(
"Calculation data of %s %s %s: %s took %dms" % (
self.__host, self.__service, conf[LABEL], self.get_method(conf), delta_ms(start))
)
if anomaly_data:
InfluxDBWriter.write_anomaly(data=anomaly_data,
host=self.__host,
service=self.__service,
# command=self.__command,
performance_label=conf[LABEL])
if log_peformance():
logging.getLogger(__name__).debug(
"Writing data of %s %s %s: %s took %dms" % (
self.__host, self.__service, conf[LABEL], self.get_method(conf), delta_ms(start))
)
else:
logging.getLogger(__name__).debug(
"Calculation did not return any data: %s %s %s: %s" % (
self.__host, self.__service, conf[LABEL], self.get_method(conf))
)
@staticmethod
def get_method(c):
if python_3():
method_name = c[METHOD]
else:
method_name = c[METHOD].encode('utf8')
return str.lower(method_name)
@staticmethod
def calc_start_date(last_data_point, interval):
return math.ceil(float(last_data_point) / interval) * interval
| Griesbacher/dataScryer | datascryer/jobs/job.py | Python | gpl-3.0 | 7,226 |
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# TODO: Is this a mixin or a base class?
import string
import iris.std_names
import iris.unit
class LimitedAttributeDict(dict):
_forbidden_keys = ('standard_name', 'long_name', 'units', 'bounds', 'axis',
'calendar', 'leap_month', 'leap_year', 'month_lengths',
'coordinates', 'grid_mapping', 'climatology',
'cell_methods', 'formula_terms', 'compress',
'missing_value', 'add_offset', 'scale_factor',
'valid_max', 'valid_min', 'valid_range', '_FillValue')
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# Check validity of keys
for key in self.iterkeys():
if key in self._forbidden_keys:
raise ValueError('%r is not a permitted attribute' % key)
def __eq__(self, other):
# Extend equality to allow for NumPy arrays.
match = self.viewkeys() == other.viewkeys()
if match:
for key, value in self.iteritems():
match = value == other[key]
try:
match = bool(match)
except ValueError:
match = match.all()
if not match:
break
return match
def __ne__(self, other):
return not self == other
def __setitem__(self, key, value):
if key in self._forbidden_keys:
raise ValueError('%r is not a permitted attribute' % key)
dict.__setitem__(self, key, value)
def update(self, other, **kwargs):
# Gather incoming keys
keys = []
if hasattr(other, "keys"):
keys += list(other.keys())
else:
keys += [k for k, v in other]
keys += list(kwargs.keys())
# Check validity of keys
for key in keys:
if key in self._forbidden_keys:
raise ValueError('%r is not a permitted attribute' % key)
dict.update(self, other, **kwargs)
class CFVariableMixin(object):
def name(self, default='unknown'):
"""
Returns a human-readable name.
First it tries :attr:`standard_name`, then 'long_name', then 'var_name'
before falling back to the value of `default` (which itself defaults to
'unknown').
"""
return self.standard_name or self.long_name or self.var_name or default
def rename(self, name):
"""
Changes the human-readable name.
If 'name' is a valid standard name it will assign it to
:attr:`standard_name`, otherwise it will assign it to
:attr:`long_name`.
"""
try:
self.standard_name = name
self.long_name = None
except ValueError:
self.standard_name = None
self.long_name = unicode(name)
# Always clear var_name when renaming.
self.var_name = None
@property
def standard_name(self):
"""The standard name for the Cube's data."""
return self._standard_name
@standard_name.setter
def standard_name(self, name):
if name is None or name in iris.std_names.STD_NAMES:
self._standard_name = name
else:
raise ValueError('%r is not a valid standard_name' % name)
@property
def units(self):
"""The :mod:`~iris.unit.Unit` instance of the object."""
return self._units
@units.setter
def units(self, unit):
self._units = iris.unit.as_unit(unit)
@property
def var_name(self):
"""The CF variable name for the object."""
return self._var_name
@var_name.setter
def var_name(self, name):
if name is not None:
if not name:
raise ValueError('An empty string is not a valid CF variable '
'name.')
elif set(name).intersection(string.whitespace):
raise ValueError('{!r} is not a valid CF variable name because'
' it contains whitespace.'.format(name))
self._var_name = name
@property
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, attributes):
self._attributes = LimitedAttributeDict(attributes or {})
| jkettleb/iris | lib/iris/_cube_coord_common.py | Python | lgpl-3.0 | 5,198 |
from __future__ import absolute_import
from django.forms.fields import ChoiceField
from django.forms.widgets import RadioSelect
from django.utils.translation import ugettext_lazy as _
from events.base import FormFieldPlugin, get_theme
from events.constants import (
SUBMIT_VALUE_AS_VAL,
SUBMIT_VALUE_AS_REPR
)
from events.helpers import get_select_field_choices, safe_text
from . import UID
from .forms import RadioInputForm
from .settings import SUBMIT_VALUE_AS
__title__ = 'events.contrib.plugins.form_elements.fields.radio.base'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('RadioInputPlugin',)
theme = get_theme(request=None, as_instance=True)
class RadioInputPlugin(FormFieldPlugin):
"""Radio field plugin."""
uid = UID
name = _("Radio")
group = _("Fields")
form = RadioInputForm
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
"""Get form field instances."""
choices = get_select_field_choices(self.data.choices)
widget_attrs = {'class': theme.form_radio_element_html_class}
field_kwargs = {
'label': self.data.label,
'help_text': self.data.help_text,
'initial': self.data.initial,
'required': self.data.required,
'choices': choices,
'widget': RadioSelect(attrs=widget_attrs),
}
return [(self.data.name, ChoiceField, field_kwargs)]
def submit_plugin_form_data(self, form_entry, request, form,
form_element_entries=None, **kwargs):
"""Submit plugin form data/process.
:param fobi.models.FormEntry form_entry: Instance of
``fobi.models.FormEntry``.
:param django.http.HttpRequest request:
:param django.forms.Form form:
"""
# In case if we should submit value as is, we don't return anything.
# In other cases, we proceed further.
if SUBMIT_VALUE_AS != SUBMIT_VALUE_AS_VAL:
# Get the object
value = form.cleaned_data.get(self.data.name, None)
# Get choices
choices = dict(get_select_field_choices(self.data.choices))
if value in choices:
# Handle the submitted form value
label = safe_text(choices.get(value))
# Should be returned as repr
if SUBMIT_VALUE_AS == SUBMIT_VALUE_AS_REPR:
value = label
# Should be returned as mix
else:
value = "{0} ({1})".format(label, value)
# Overwrite ``cleaned_data`` of the ``form`` with object
# qualifier.
form.cleaned_data[self.data.name] = value
# It's critically important to return the ``form`` with updated
# ``cleaned_data``
return form
| mansonul/events | events/contrib/plugins/form_elements/fields/radio/base.py | Python | mit | 3,052 |
from PartyProblemSimulator.BooleanEquation.Equation import Equation
from threading import Thread
class Experiment(Thread):
""" An experiment to be run on the Party Problem Simulator. """
def run(self):
""" Should be implemented to execute the experiment and save results. """
results = self._do_experiment()
self._save_results(results)
def _do_experiment(self):
""" Execute the experiment and return results. """
raise NotImplementedError("The do_experiment method of Experiment is not implemented.")
def _test_method(self, method, no_trials, test_cases):
""" Tests the given method with x trials on all test cases provided. """
results = []
for test_case in test_cases:
test_case_aes = 0
test_case_sr = 0
trial_count = 0
while trial_count < no_trials:
trial_res = self._do_trial(method(), Equation(test_case['Equation']), test_case['NumVars']) # Do the trial
if trial_res['Success']: # Only add information if it was successful
test_case_sr = test_case_sr + 1
test_case_aes = test_case_aes + trial_res['Evaluations']
trial_count = trial_count + 1
try:
test_case_aes = test_case_aes / test_case_sr # Divide by the number of successes
except ZeroDivisionError:
test_case_aes = 0
test_case_sr = test_case_sr / no_trials # No. Successful trials / percentage
results.append({
"AES": test_case_aes,
"SR": test_case_sr
})
return results
def _do_trial(self, method, equation, variable_count):
""" Does a single trial of the algorithm provided. """
method.run(equation, variable_count)
results = {} # Build response
results['Evaluations'] = method.get_num_evaluations()
if (method.get_best_genome() is None) or (method.get_best_genome().evaluate(equation) == 1): # Did the method find a solution?
results['Success'] = True
else:
results['Success'] = False
return results
def _save_results(self, results):
""" Saves the results of this experiment to disk. """
for res in results:
with open('PartyProblemSimulator\Experiments\Results\{}.res'.format(res['Method']), 'w') as file: # Open file with name of method used
file.write("METHOD NAME: {}\n".format(res['Method'])) # Output the goodies
file.write("AES: {}\n".format(res['Overall']['AES']))
file.write("SR: {}\n".format(res['Overall']['SR']))
file.write("--------------------------\n")
for case_res in res['CaseResults']:
file.write("Case AES: {}\t\tCase SR: {}\n".format(case_res['AES'], case_res['SR']))
def _load_test_cases(self):
""" Loads or creates the test cases to be used. """
raise NotImplementedError("The _load_test_cases method of Experiment is not implemented.")
def _calculate_results(self, results):
""" Calculates the SR (Success Rate) and AES (Average Evaluations per Solution) based on the results given."""
sr = 0
aes = 0
for result in method_results:
aes = aes + result['AES']
sr = sr + result['SR']
aes = aes / len(method_results)
sr = sr / len(method_results)
return {"AES": aes, "SR": sr} | Sciprios/EvolutionaryPartyProblemSimulator | PartyProblemSimulator/Experiments/Experiment.py | Python | mit | 3,577 |
from setuptools import setup, find_packages
# Kept manually in sync with airflow.__version__
version = '1.2.0'
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'pyhs2>=0.6.0',
]
mysql = ['mysql-python>=1.2.5']
postgres = ['psycopg2>=2.6']
optional = ['librabbitmq>=1.6.1']
samba = ['pysmbclient>=0.1.3']
s3 = ['boto>=2.36.0']
all_dbs = postgres + mysql + hive
devel = all_dbs + doc + samba + s3 + ['nose']
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
version=version,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'celery>=3.1.17',
'chartkick>=0.4.2',
'dill>=0.2.2',
'flask>=0.10.1',
'flask-admin>=1.0.9',
'flask-cache>=0.13.1',
'flask-login>=0.2.11',
'flower>=0.7.3',
'jinja2>=2.7.3',
'markdown>=2.5.2',
'pandas>=0.15.2',
'pygments>=2.0.1',
'python-dateutil>=2.3',
'requests>=2.5.1',
'setproctitle>=1.1.8',
'snakebite>=2.4.13',
'sqlalchemy>=0.9.8',
'statsd>=3.0.1',
'thrift>=0.9.2',
'tornado>=4.0.2',
],
extras_require={
'all': devel + optional,
'all_dbs': all_dbs,
'doc': doc,
'devel': devel,
'hive': hive,
'mysql': mysql,
'postgres': postgres,
's3': s3,
'samba': samba,
},
author='Maxime Beauchemin',
author_email='maximebeauchemin@gmail.com',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
)
| smarden1/airflow | setup.py | Python | apache-2.0 | 1,834 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from djangocms_bootstrap3 import __version__
INSTALL_REQUIRES = []
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-bootstrap3-grid',
version=__version__,
description='Bootstrap3 grid system plugin for django CMS',
author='Maidakov Mikhail',
author_email='m-email@inbox.com',
url='https://github.com/m-mix/djangocms-bootstrap3-grid',
packages=find_packages(exclude=[]),
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.rst').read(),
include_package_data=True,
zip_safe=False
)
| m-mix/djangocms-bootstrap3-grid | setup.py | Python | bsd-2-clause | 1,149 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, _
from odoo.exceptions import UserError
class WebsiteVisitor(models.Model):
_inherit = 'website.visitor'
def _check_for_sms_composer(self):
""" Purpose of this method is to actualize visitor model prior to contacting
him. Used notably for inheritance purpose, when dealing with leads that
could update the visitor model. """
return bool(self.partner_id and (self.partner_id.mobile or self.partner_id.phone))
def _prepare_sms_composer_context(self):
return {
'default_res_model': 'res.partner',
'default_res_id': self.partner_id.id,
'default_composition_mode': 'comment',
'default_number_field_name': 'mobile' if self.partner_id.mobile else 'phone',
}
def action_send_sms(self):
self.ensure_one()
if not self._check_for_sms_composer():
raise UserError(_("There are no contact and/or no phone or mobile numbers linked to this visitor."))
visitor_composer_ctx = self._prepare_sms_composer_context()
compose_ctx = dict(self.env.context)
compose_ctx.update(**visitor_composer_ctx)
return {
"name": _("Send SMS Text Message"),
"type": "ir.actions.act_window",
"res_model": "sms.composer",
"view_mode": 'form',
"context": compose_ctx,
"target": "new",
}
| jeremiahyan/odoo | addons/website_sms/models/website_visitor.py | Python | gpl-3.0 | 1,530 |
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django.core.urlresolvers import reverse_lazy
from django import shortcuts
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon import forms
import json
from crystal_dashboard.dashboards.crystal import exceptions as sdsexception
from crystal_dashboard.dashboards.crystal.rings.storage_policies import models as storage_policies_models
from crystal_dashboard.api import swift as api
class MyFilterAction(tables.FilterAction):
name = "myfilter"
class CreateStoragePolicy(tables.LinkAction):
name = "create_storage_policy"
verbose_name = _("Create Replication Policy")
url = "horizon:crystal:rings:storage_policies:create_storage_policy"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, policies):
return len(self.table.get_rows()) > 0
class CreateECStoragePolicy(tables.LinkAction):
name = "create_ec_storage_policy"
verbose_name = _("Create EC Policy")
url = "horizon:crystal:rings:storage_policies:create_ec_storage_policy"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, policies):
return len(self.table.get_rows()) > 0
class LoadSwiftPolicies(tables.Action):
name = "load_swift_policies"
verbose_name = _("Load Swift Policies")
requires_input = False
icon = "upload"
def allowed(self, request, policies):
return len(self.table.get_rows()) == 0
def single(self, data_table, request, object_id):
try:
response = api.load_swift_policies(request)
if 200 <= response.status_code < 300:
messages.success(request, _("Storage Policies loaded successfully"))
return shortcuts.redirect('horizon:crystal:rings:index')
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:rings:index")
error_message = "Unable to load policies.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class ManageDisksLink(tables.LinkAction):
name = "users"
verbose_name = _("Manage Devices")
url = "horizon:crystal:rings:storage_policies:devices"
icon = "pencil"
def get_link_url(self, datum=None):
return reverse(self.url, kwargs={'policy_id': self.datum.id})
class UpdateCell(tables.UpdateAction):
def allowed(self, request, project, cell):
return True
def update_cell(self, request, datum, obj_id, cell_name, new_cell_value):
try:
api.swift_edit_storage_policy(request, obj_id, {cell_name: new_cell_value})
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, obj_id):
response = api.swift_storage_policy_detail(request, obj_id)
inst = json.loads(response.text)
parameters = ', '.join([key.replace('_', ' ').title()+':'+inst[key] for key in inst.keys() if key not in ['id', 'name', 'policy_type', 'default', 'devices', 'deprecated', 'deployed']])
policy = storage_policies_models.StoragePolicy(inst['storage_policy_id'], inst['name'], inst['policy_type'],
inst['default'], parameters, inst['deprecated'], inst['deployed'], inst['devices'])
return policy
class UpdateStoragePolicy(tables.LinkAction):
name = "update"
verbose_name = _("Edit")
icon = "pencil"
classes = ("ajax-modal", "btn-update",)
def get_link_url(self, datum=None):
base_url = reverse("horizon:crystal:rings:storage_policies:update_storage_policy", kwargs={'id': datum.id})
return base_url
class DeleteStoragePolicy(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Storage Policy",
u"Delete Storage Policy",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Storage Policy",
u"Deleted Storage Policy",
count
)
name = "delete_storage_policy"
success_url = "horizon:crystal:rings:index"
def delete(self, request, obj_id):
try:
response = api.swift_delete_storage_policy(request, obj_id)
if 200 <= response.status_code < 300:
pass
# messages.success(request, _("Successfully deleted controller: %s") % obj_id)
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:rings:index")
error_message = "Unable to remove storage policy.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class DeleteMultipleStoragePolicies(DeleteStoragePolicy):
name = "delete_multiple_storage_policies"
class DeployChanges(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Deploy Changes",
u"Deploy Changes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Changes deployed",
u"Changes deployed",
count
)
name = "add"
icon = "plus"
requires_input = True
success_url = "horizon:crystal:rings:index"
def allowed(self, request, storage_policy):
return not storage_policy.deployed
def action(self, request, obj_id):
try:
response = api.deploy_storage_policy(request, obj_id)
if not 200 <= response.status_code < 300:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:rings:index")
error_message = "Unable to deploy storage policy.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class StoragePolicyTable(tables.DataTable):
id = tables.Column('id', verbose_name=_("ID"))
name = tables.Column('name', verbose_name=_("Name"))
type = tables.Column('type', verbose_name=_("Type"))
default = tables.Column('default', verbose_name=_("Default"),
form_field=forms.ChoiceField(choices=[('True', _('True')), ('False', _('False'))]), update_action=UpdateCell)
parameters = tables.Column('parameters', verbose_name=_("Parameters"))
deprecated = tables.Column('deprecated', verbose_name=_("Deprecated"),
form_field=forms.ChoiceField(choices=[('True', _('True')), ('False', _('False'))]), update_action=UpdateCell)
devices = tables.Column('devices', verbose_name=_("Devices"))
deployed = tables.Column('deployed', verbose_name=_("Deployed"))
class Meta:
name = "storagepolicies"
verbose_name = _("Storage Policies")
table_actions = (MyFilterAction, CreateStoragePolicy, CreateECStoragePolicy, LoadSwiftPolicies, DeleteMultipleStoragePolicies,)
row_actions = (DeployChanges, ManageDisksLink, UpdateStoragePolicy, DeleteStoragePolicy)
row_class = UpdateRow
class AddDisk(tables.LinkAction):
name = "add_disk"
verbose_name = _("Add Device")
url = "horizon:crystal:rings:storage_policies:add_devices"
classes = ("ajax-modal",)
icon = "plus"
def get_link_url(self, datum=None):
return reverse(self.url, kwargs=self.table.kwargs)
class DeleteDisk(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete disk",
u"Delete disks",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Disk deleted",
u"Disks deleted",
count
)
name = "delete"
success_url = "horizon:crystal:rings:storage_policies:devices"
def delete(self, request, obj_id):
policy_id = self.table.kwargs['policy_id']
try:
response = api.swift_remove_disk_storage_policy(request, policy_id, obj_id)
if not 200 <= response.status_code < 300:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:rings:storage_policies:devices")
error_message = "Unable to remove disk.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
def get_success_url(self, request=None):
policy_id = self.table.kwargs.get('policy_id', None)
return reverse(self.success_url, args=[policy_id])
class ManageDisksTable(tables.DataTable):
storage_node = tables.WrappingColumn('storage_node', verbose_name=_('Storage Node'))
region = tables.Column('region', verbose_name="Region")
zone = tables.Column('zone', verbose_name="Zone")
device = tables.Column('device', verbose_name="Device")
size_occupied = tables.Column('size_occupied', verbose_name="Size Occupied")
size = tables.Column('size', verbose_name="Total Size")
class Meta(object):
name = "diskstable"
verbose_name = _("Devices")
table_actions = (MyFilterAction, AddDisk, DeleteDisk)
class AddDisksAction(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Add Device",
u"Add Devices",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Added Device",
u"Added Devices",
count
)
name = "add"
icon = "plus"
requires_input = True
success_url = "horizon:crystal:rings:storage_policies:devices"
def action(self, request, obj_id):
policy_id = self.table.kwargs['policy_id']
try:
response = api.swift_add_disk_storage_policy(request, policy_id, obj_id)
if not 200 <= response.status_code < 300:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:rings:storage_policies:devices")
error_message = "Unable to add disk.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
def get_success_url(self, request=None):
policy_id = self.table.kwargs.get('policy_id', None)
return reverse(self.success_url, args=[policy_id])
class AddDisksTable(ManageDisksTable):
class Meta(object):
name = "add_devices_table"
verbose_name = _("Devices")
table_actions = (MyFilterAction, AddDisksAction,)
| Crystal-SDS/dashboard | crystal_dashboard/dashboards/crystal/rings/storage_policies/tables.py | Python | gpl-3.0 | 10,995 |
import logging
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from product_spiders.items import Product, ProductLoader
class TECLIMA_spider(BaseSpider):
name = 'TEKCLIMA.ebay'
allowed_domains = ['www.ebay.it', 'stores.ebay.it', 'ebay.it', 'ebay.com']
start_urls = ('http://stores.ebay.it/TEKCLIMA',)
scraped_urls = []
items_count = 0
items_scraped = 0
pages_count = 0
tries = 0
def __init__(self, *a, **kw):
super(TECLIMA_spider, self).__init__(*a, **kw)
dispatcher.connect(self.spider_idle, signals.spider_idle)
dispatcher.connect(self.item_scraped, signals.item_scraped)
def spider_idle(self, spider):
logging.error("Total count: %d" % self.items_count)
logging.error("Items scraped: %d" % self.items_scraped)
if (self.items_count > self.items_scraped) and (self.tries < 5):
logging.error("Not all scraped: found %d of %d" % (self.items_scraped, self.items_count))
request = Request(self.start_urls[0], dont_filter=True)
self._crawler.engine.crawl(request, self)
else:
logging.error("Scraped %d of %d. The rest are duplicates" % (self.items_scraped, self.items_count))
logging.error("Finished on %d try" % self.tries)
def item_scraped(self, item, response, spider):
if spider == self:
self.items_scraped += 1
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//td[@id="CentralArea"]//td[@class="details"]')
logging.error("Debug. %s. Found %d products" % (response.url, len(products)))
for p in products:
try:
name = p.select('./div[1]/a/@title')[0].extract()
except IndexError:
continue
try:
url = p.select('./div[1]/a/@href')[0].extract()
except IndexError:
continue
price = p.select('./div[3]/table/tr[1]/td[2]/span/text()').re(r'([0-9\.\, ]+)')
if not price:
price = p.select('./div[2]/table/tr[1]/td[2]/span/text()').re(r'([0-9\.\, ]+)')
self.scraped_urls.append(url)
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_value('name', name.strip())
product_loader.add_value('price', price[0].replace(".", "").replace(",", "."))
product_loader.add_value('url', url)
yield product_loader.load_item()
def parse(self, response):
self.tries += 1
logging.error("Try %d" % self.tries)
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
base_url = get_base_url(response)
items_count = hxs.select("//span[@class='smuy']/span[@class='countClass']/text()").extract()
if not items_count:
logging.error("Items count not found!")
return
self.items_count = int(items_count[0].replace(".", ""))
self.pages_count = self.items_count / 30 + 1
#pages
for i in range(1, self.pages_count + 1):
url = "http://stores.ebay.it/TEKCLIMA/_i.html?_pgn=" + str(i)
yield Request(url, dont_filter=True, callback=self.parse_product)
| 0--key/lib | portfolio/Python/scrapy/rosario/tekclima.py | Python | apache-2.0 | 3,568 |
#!/usr/bin/env python3
from deploy_stack import deploy_job
if __name__ == '__main__':
deploy_job()
| freyes/juju | acceptancetests/deploy_job.py | Python | agpl-3.0 | 103 |
count = 0
for i in range(1, 22) : ## len(str(9**22)) == 21
count += sum([len(str(j**i)) == i for j in range(1,10)]) | alanbly/ProjectEuler | 63.py | Python | mit | 119 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This file is part of manageR
Copyright (C) 2008-9 Carson J. Q. Farmer
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public Licence as published by the Free Software
Foundation; either version 2 of the Licence, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more
details.
You should have received a copy of the GNU General Public Licence along with
this program (see LICENSE file in install directory); if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
Portions of the console and EditR window, as well as several background
funtions are based on the Sandbox python gui of Mark Summerfield.
Copyright (C) 2007-9 Mark Summerfield. All rights reserved.
Released under the terms of the GNU General Public License.
The plugins functinality is based largely on the PostGisTools plugin of Mauricio de Paulo.
Copyright (C) 2009 Mauricio de Paulo. All rights reserved.
Released under the terms of the GNU General Public License.
manageR makes extensive use of rpy2 (Laurent Gautier) to communicate with R.
Copyright (C) 2008-9 Laurent Gautier.
Rpy2 may be used under the terms of the GNU General Public License.
'''
from PyQt4.QtCore import *
from PyQt4.QtGui import *
#from qgis.core import *
from qgis.gui import *
import resources
class Plugin:
def __init__(self, iface, version):
self.iface = iface
self.version = version
def initGui(self):
self.action = QAction(QIcon( ":icon.png" ), "manageR", self.iface.mainWindow())
self.action.setWhatsThis("Interface to the R statistical programming language")
self.action.setStatusTip("Interface to the R statistical programming language")
QObject.connect(self.action, SIGNAL("triggered()"), self.run)
# Add to the main toolbar
self.iface.addToolBarIcon(self.action)
def unload(self):
self.iface.removeToolBarIcon(self.action)
def run(self):
from manageR_updated import MainWindow
MainWindow(self.iface.mainWindow(), iface=self.iface, console=True).show()
| karstenv/manageR | plugin.py | Python | gpl-2.0 | 2,416 |
"""
Test incoming call handling - reject a call because we're busy, and for no
reason.
"""
from twisted.words.xish import xpath
from servicetest import make_channel_proxy, EventPattern, call_async
from jingletest2 import JingleTest2, test_all_dialects
import constants as cs
def test_busy(jp, q, bus, conn, stream):
test(jp, q, bus, conn, stream, True)
def test_no_reason(jp, q, bus, conn, stream):
test(jp, q, bus, conn, stream, False)
def test(jp, q, bus, conn, stream, busy):
remote_jid = 'foo@bar.com/Foo'
jt = JingleTest2(jp, conn, q, stream, 'test@localhost', remote_jid)
jt.prepare()
self_handle = conn.GetSelfHandle()
remote_handle = conn.RequestHandles(cs.HT_CONTACT, [remote_jid])[0]
# Remote end calls us
jt.incoming_call()
# FIXME: these signals are not observable by real clients, since they
# happen before NewChannels.
# The caller is in members
e = q.expect('dbus-signal', signal='MembersChanged',
args=[u'', [remote_handle], [], [], [], 0, 0])
# We're pending because of remote_handle
e = q.expect('dbus-signal', signal='MembersChanged',
args=[u'', [], [], [self_handle], [], remote_handle,
cs.GC_REASON_INVITED])
# S-E gets notified about new session handler, and calls Ready on it
e = q.expect('dbus-signal', signal='NewSessionHandler')
assert e.args[1] == 'rtp'
session_handler = make_channel_proxy(conn, e.args[0], 'Media.SessionHandler')
session_handler.Ready()
media_chan = make_channel_proxy(conn, e.path, 'Channel.Interface.Group')
# Exercise channel properties
channel_props = media_chan.GetAll(cs.CHANNEL,
dbus_interface=cs.PROPERTIES_IFACE)
assert channel_props['TargetHandle'] == remote_handle
assert channel_props['TargetHandleType'] == 1
assert channel_props['TargetID'] == 'foo@bar.com'
assert channel_props['Requested'] == False
assert channel_props['InitiatorID'] == 'foo@bar.com'
assert channel_props['InitiatorHandle'] == remote_handle
if busy:
# First, try using a reason that doesn't make any sense
call_async(q, media_chan, 'RemoveMembersWithReason',
[self_handle], "what kind of a reason is Separated?!",
cs.GC_REASON_SEPARATED)
e = q.expect('dbus-error', method='RemoveMembersWithReason')
assert e.error.get_dbus_name() == cs.INVALID_ARGUMENT
# Now try a more sensible reason.
media_chan.RemoveMembersWithReason([self_handle],
"which part of 'Do Not Disturb' don't you understand?",
cs.GC_REASON_BUSY)
else:
media_chan.RemoveMembers([self_handle], 'rejected')
iq, mc, _ = q.expect_many(
EventPattern('stream-iq',
predicate=jp.action_predicate('session-terminate')),
EventPattern('dbus-signal', signal='MembersChanged'),
EventPattern('dbus-signal', signal='Closed'),
)
_, added, removed, lp, rp, actor, reason = mc.args
assert added == [], added
assert set(removed) == set([self_handle, remote_handle]), \
(removed, self_handle, remote_handle)
assert lp == [], lp
assert rp == [], rp
assert actor == self_handle, (actor, self_handle)
if busy:
assert reason == cs.GC_REASON_BUSY, reason
else:
assert reason == cs.GC_REASON_NONE, reason
if jp.is_modern_jingle():
jingle = iq.query
if busy:
r = "/jingle/reason/busy"
else:
r = "/jingle/reason/cancel"
assert xpath.queryForNodes(r, jingle) is not None, (jingle.toXml(), r)
if __name__ == '__main__':
test_all_dialects(test_busy)
test_all_dialects(test_no_reason)
| community-ssu/telepathy-gabble | tests/twisted/jingle/test-incoming-call-reject.py | Python | lgpl-2.1 | 3,735 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.log')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| poznyakandrey/oslo.service | oslo_service/_i18n.py | Python | apache-2.0 | 1,125 |
# -*- coding: utf-8 -*-
import os
import sys
from dp_tornado.engine.engine import Engine as dpEngine
from dp_tornado.engine.bootstrap import Bootstrap as EngineBootstrap
from dp_tornado.engine.testing import Testing as dpTesting
from dp_tornado.version import __version_info__
class CliHandler(dpEngine):
def __init__(self):
import argparse
parser = argparse.ArgumentParser()
args = [
['action', {'nargs': 1}],
['options', {'nargs': '*'}],
['--identifier', {'help': 'Identifier'}],
['--debug', {'help': 'Debug Mode, If this value specified `yes` then enabled.'}],
['--dryrun', {'help': 'Dryrun, If this value specified `yes` then enabled.'}],
['--template', {'help': 'Template Name', 'default': 'helloworld'}],
['--logging', {'help': 'Logging'}],
['--path', {'help': 'App Path'}],
['--ini', {'help': 'INI File Path'}],
['--port', {'help': 'Binding port', 'type': int}]
]
# Auto reload for Debugging mode.
if 'DP_CLI_ARGV' in os.environ:
sys.argv = os.environ['DP_CLI_ARGV'].split('|')
for e in args:
parser.add_argument(e[0], **e[1])
self.parser = parser
self.args, self.args_unknown = parser.parse_known_args()
self.cwd = self.helper.io.path.cwd()
if self.args.debug and not self.args.dryrun:
os.environ['DP_CLI_ARGV'] = '|'.join(sys.argv)
def main(self):
self.logging.info('------------------------')
self.logging.info('* dp for Python v%s' % '.'.join([str(e) for e in __version_info__]))
self.logging.info('------------------------')
for e in self.args.options:
if not self.args.path:
path = self.helper.io.path.join(self.cwd, e)
if self.helper.io.path.is_file(path) or \
(self.helper.io.path.mkdir(path) and self.helper.io.path.is_dir(path)):
self.args.path = e
if self.args.action and self.args.action[0] == 'init':
self.command_init()
elif self.args.action and self.args.action[0] == 'run':
self.command_run()
elif self.args.action and self.args.action[0] == 'test':
self.command_test()
else:
self.logging.info('* dp4p finished, unrecognized action.')
import sys
self.logging.info('%s' % sys.argv)
return exit(1)
return exit(0)
def command_init(self):
installable = True
init_path = self._pathified()
self.logging.info('* Initializing app .. %s' % init_path)
if self.helper.io.path.is_dir(init_path):
browse = self.helper.io.path.browse(init_path)
browse = [e for e in browse if not self.helper.io.path.split(e)[1].startswith('.')]
if len(browse) > 0:
status = 'Not Empty'
installable = False
else:
status = 'Empty'
elif self.helper.io.path.is_file(init_path):
status = 'File'
installable = False
else:
self.helper.io.path.mkdir(init_path)
if not self.helper.io.path.is_dir(init_path):
status = 'Permission Denied'
installable = False
else:
status = 'Empty'
if not installable:
self.logging.info('* Initialization failed, %s' % status)
return exit(1)
engine_path = self.helper.io.path.dirname(__file__)
application_path = init_path
# template initialization.
if not EngineBootstrap.init_template(
engine_path=engine_path, application_path=application_path, template_name=self.args.template):
self.logging.info('* Initialization failed.')
return exit(1)
self.logging.info('* Initialization succeed.')
def command_run(self):
init_path = self._pathified()
init_py = init_path
executable = True
if self.helper.io.path.is_dir(init_py):
init_py = '%s/__init__.py' % init_py
self.logging.info('* Running app .. %s' % init_py)
if not self.helper.io.path.is_file(init_py):
executable = False
if not executable:
self.logging.info('* Running failed, Not executable path.')
return exit(1)
app_module = self._append_sys_path(init_path)
import sys
__import__(app_module)
app = sys.modules[app_module] if app_module in sys.modules else None
app_run = getattr(app, 'run', None) if app else None
if not app_run:
self.logging.info('* Running failed, Invalid app.')
return exit(1)
for i in range(len(self.args.options) + 1):
sys.argv.pop(1)
try:
app_run(self)
except KeyboardInterrupt:
pass
except Exception as e:
self.logging.exception(e)
def command_test(self):
tester = dpTesting(self._append_sys_path(), self._pathified())
if not tester.traverse():
self.logging.info('* Testing failed.')
return exit(1)
disable_logging = True
if self.args.logging in ('enable', 'true', '1', 'yes', 'on'):
disable_logging = False
tester.server_start(disable_logging=disable_logging)
tested = tester.run()
tester.server_stop()
if not tested:
self.logging.info('* Testing failed.')
else:
self.logging.info('* Testing succeed.')
if not tested:
return exit(1)
def _append_sys_path(self, init_path=None):
dirpath = init_path or self._pathified()
modules = []
while True:
dirpath, module = self.helper.io.path.split(dirpath)
modules.append(module)
if not self.helper.io.path.is_file(self.helper.io.path.join(dirpath, '__init__.py')):
break
app_module = '.'.join(modules[::-1])
self.helper.io.path.sys.insert(0, dirpath)
self.helper.io.path.sys.insert(1, init_path)
return app_module
def _pathified(self):
path = self.helper.io.path.join(self.cwd, self.args.path) if self.args.path else self.cwd
path = path[:-1] if path.endswith('/') else path
if path.endswith('__init__.py'):
path = self.helper.io.path.dirname(path)
return path
cli = CliHandler()
def main(as_module=False):
cli.main()
if __name__ == '__main__':
main(as_module=True)
| why2pac/dp-tornado | dp_tornado/cli.py | Python | mit | 6,717 |
# -*- coding:utf-8 -*-
import http.cookiejar
import time
import urllib
import urllib.request
from datetime import date
from datetime import datetime
from io import BytesIO
from urllib.parse import urlparse
from PIL import Image
from bs4 import BeautifulSoup
def get_captcha():
data = urllib.request.urlopen('https://curricula.bfsu.edu.cn/academic/getCaptcha.do')
cap1 = Image.open(BytesIO(data.read()))
wide, high = cap1.size
cap1 = cap1.resize((wide * 5, high * 5))
cap1.show()
return
def str_len(string):
row_l = len(string)
gbk_l = len(string.encode('gbk'))
return gbk_l - row_l
def getscore(t):
if len(t) <= 2:
t = date.today()
if 11 >= int(t.strftime('%m')) >= 7:
yearid = str(int(t.strftime('%Y')) - 1980)
termid = '1'
else:
yearid = str(int(t.strftime('%Y')) - 1981)
termid = '2'
# print(yearID,termID)
else:
yearid = str(int(t[:4]) - 1980)
termid = t[-1].lower()
if termid == 'a':
termid = '2'
elif termid == 's':
termid = '1'
else:
print('Illegal input format.\n')
return -1
# print(yearID,termID)
print('当前查询时间:' + str(int(yearid) + 1980), '第', termid, '学期')
postdata = {'year': yearid, 'term': termid, 'para': '0', 'sortColumn': '', 'Submit': '查询'}
postdata = urllib.parse.urlencode(postdata).encode('utf-8')
postu = 'https://curricula.bfsu.edu.cn/academic/manager/score/studentOwnScore.do?groupId=&moduleId=2020'
requ = urllib.request.Request(postu, postdata, headers)
# print(request)
re = urllib.request.urlopen(requ)
content = re.read().decode('utf-8')
# print(text)
score = BeautifulSoup(content, 'html.parser')
score = score.find('table', {'class': 'datalist'})
if score is None:
print('无成绩!\n\n')
return
# print(score)
print('=' * 92)
print('|{:^2}|{:^2}|{:^5}|{:^25}|{:^3}|{:^4}|{:^3}|{:^2}|{:^4}|{:^4}|'.format('学年', '学期', '课程号', '课程名', '课序号', '总评',
'学分', '学时', '考试性质', '及格标志'))
print('-' * 92)
for tab in score.find_all('tr'):
for i, tdd in enumerate(tab.find_all('td')):
if i == 0:
content = '|{:^4}|'.format(tdd.string.strip())
print(content, end="")
elif i == 1:
content = '{:^3}|'.format(tdd.string.strip())
print(content, end="")
elif i == 2:
content = '{:<8}|'.format(tdd.string.strip())
print(content, end="")
elif i == 3:
# format='{:<15}|'.format(tdd.string.strip())
# print (format,end="")
print('%-*s|' % (int(28 - str_len(tdd.string.strip())), tdd.string.strip()), end="")
elif i == 4:
content = '{:^6}|'.format(tdd.string.strip())
print(content, end="")
elif i == 5:
content = '{:^6}|'.format(tdd.string.strip())
print(content, end="")
elif i == 6:
content = '{:^5}|'.format(tdd.string.strip())
print(content, end="")
elif i == 7:
content = '{:^4}|'.format(tdd.string.strip())
print(content, end="")
elif i == 8:
content = '{:^4}|'.format(tdd.string.strip())
print(content, end="")
elif i == 9:
content = '{:^6}|'.format(tdd.string.strip())
print(content)
else:
# raise
pass
print('\nScores have been got.\n')
return
def loginselect():
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1',
'Referer': '''https://curricula.bfsu.edu.cn/
academic/student/selectcoursedb/jumppage.jsp?
groupId=&moduleId=2050'''}
req = urllib.request.Request('https://curricula.bfsu.edu.cn/academic/manager/electcourse/mgspage.do',
headers=header)
while True:
re = urllib.request.urlopen(req)
print(re)
re = re.read().decode('utf-8')
if re.find('选课提示') != -1:
return True
def getuserid():
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1',
'Referer': 'https://curricula.bfsu.edu.cn/academic/manager/electcourse/stusced.do'}
req = urllib.request.Request('https://curricula.bfsu.edu.cn/academic/manager/electcourse/stusced.do#fastsc',
headers=header)
res0 = urllib.request.urlopen(req).read()
try:
res=res0.decode()
except:
print('连接失败...尝试获取错误信息...')
res=res0.decode('gbk')
if res.find('ServletException')>=0:
print('选课系统可能已关闭\n错误信息如下\n--------------------\n%s\n====================' %res0)
return 'ServerError'
#print('res:',res)
html = BeautifulSoup(res, 'html.parser')
user = html.find('input', attrs={'type': 'hidden', 'name': 'checkUserid'})
if user is None:
return None
return user['value']
def quickselect(course, se='1'):
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1',
'Referer': 'https://curricula.bfsu.edu.cn/academic/manager/electcourse/stusced.do'}
postdata = {'pcourseid': course, 'seq': se, 'checkUserid': userid, 'Submit': '选课'}
postdata = urllib.parse.urlencode(postdata).encode('utf-8')
req = urllib.request.Request('https://curricula.bfsu.edu.cn/academic/manager/electcourse/scaddaction.do', postdata,
header)
res = urllib.request.urlopen(req).read().decode('utf-8')
# print('\n\n\n',res)
res = BeautifulSoup(res, 'html.parser')
res = res.center.body.find('script').string.strip().split('\n', 7)
ms = res[4].split(r'"')[1]
fla = int(res[5].split('=')[1][0])
# print(msg)
return fla, ms
hosturl = 'https://jwc.bfsu.edu.cn'
posturl = 'https://curricula.bfsu.edu.cn/academic/j_acegi_security_check '
cj = http.cookiejar.LWPCookieJar()
cookie_support = urllib.request.HTTPCookieProcessor(cj)
opener = urllib.request.build_opener(cookie_support, urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
h = urllib.request.urlopen(hosturl)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1',
'Referer': 'https://jwc.bfsu.edu.cn'}
while True:
username = input('ID:')
password = input('Password:')
captcha=''
while not captcha:
get_captcha()
captcha = input('请输入图中所示验证码,看不清请直接回车:')
if captcha != '':
postData = dict(groupId='', j_username=username, j_password=password, j_captcha=captcha, button1='登陆')
postData = urllib.parse.urlencode(postData).encode('utf-8')
request = urllib.request.Request(posturl, postData, headers)
# print(request)
response = urllib.request.urlopen(request)
text = response.read().decode('gbk')
# print(response.getheaders())
# print(text)
# print('='*80)
# print(text.decode('gbk'))
# print('='*80)
# print(text.decode('gbk').encode('gbk'))
if text.find('验证码') == -1:
print('Login succeed.\n等待时间1分钟')
break
else:
print('Login failed.\n')
while True:
comm = input('\n主菜单_等待指令\n')
if comm == 'score':
print('\n----查询成绩')
while True:
print( '输入查询学期,可直接回车查询默认的上学期成绩;\n否则请输入具体时间格式如\'2014a\'(2014年秋季)、\'2015s\'(2015年春季)。(输入内容不包括单引号)\n返回上一层菜单请输入\'exit\'')
time = input('查询学期:')
if time == 'exit':
break
getscore(time)
elif comm == 'select':
print('\n----单次选课')
userid = getuserid()
if userid == 'ServerError':
continue
while True:
courseid = input('请输入课程编号,返回请输入exit ')
if courseid == 'exit':
break
seq = input('若知课序号请输入课程序号,否则请直接回车 ')
if seq == '':
print(quickselect(courseid)[1])
else:
print(quickselect(courseid, seq)[1])
elif comm == 'wait':
print('\n----循环询问')
userid = getuserid()
if userid == 'ServerError':
continue
courseid = input('请输入课程编号,返回请输入exit ')
if courseid == 'exit':
continue
seq = input('若知课序号请输入课程序号,否则请直接回车 ')
n = 1
inter = input('请输入两次查询间的时间间隔,默认为0.1(秒) ')
if inter == '':
inter = 0.1
else:
inter = float(inter)
print('循环询问开始')
if seq == '':
while True:
flag, msg = quickselect(courseid)
if flag == 1:
print('\n第', n, '次尝试成功。请登录验证。课程号', courseid, '于', datetime.now())
break
n += 1
print('当前第', n, '次尝试,状态', msg, end='\r')
time.sleep(inter)
else:
while True:
flag, msg = quickselect(courseid, seq)
if flag == 1:
print('\n第', n, '次尝试成功。请登录验证。课程号', courseid, '于', datetime.now())
break
n += 1
print('当前第', n, '次尝试,状态', msg, end='\r')
time.sleep(inter)
elif comm == 't':
loginselect()
print(getuserid())
elif comm == 'exit':
break
else:
print('无效的指令')
| Whotakesmyname/Login-BFSUjwc | login.py | Python | mit | 10,713 |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pycountry
from pants.contrib.awslambda.python.examples.hello_lib import say_hello
def handler(event, context):
usa = pycountry.countries.get(alpha_2='US').name
say_hello('from the {}'.format(usa))
| wisechengyi/pants | src/python/pants/backend/awslambda/python/examples/hello_handler.py | Python | apache-2.0 | 343 |
# Problem: Reverse String
# Difficulty: Easy
# Category: String
# Leetcode 344: https://leetcode.com/problems/reverse-string/#/description
# Description:
"""
Write a function that takes a string as input and returns the string reversed.
Example:
Given s = "hello", return "olleh".
"""
class Solution(object):
def reverse_string_1(self, s):
if not s:
return s
result = ''
for i in range(len(s) - 1, -1, -1):
result += s[i]
return result
def reverse_string_2(self, s):
return s[::-1]
obj = Solution()
test_1 = 'hello'
test_2 = 'gsterssh5e'
print(obj.reverse_string_1(test_1))
print(obj.reverse_string_2(test_1))
print(obj.reverse_string_1(test_2))
print(obj.reverse_string_1(test_2))
| rush2catch/algorithms-leetcode | Basic Data Structures/string/leet_344_ReverseString.py | Python | mit | 706 |
from django.db import models as dbmodels, connection
from django.utils import datastructures
from autotest.frontend.afe import model_logic, readonly_connection
_quote_name = connection.ops.quote_name
class TempManager(model_logic.ExtendedManager):
_GROUP_COUNT_NAME = 'group_count'
def _get_key_unless_is_function(self, field):
if '(' in field:
return field
return self.get_key_on_this_table(field)
def _get_field_names(self, fields, extra_select_fields={}):
field_names = []
for field in fields:
if field in extra_select_fields:
field_names.append(extra_select_fields[field][0])
else:
field_names.append(self._get_key_unless_is_function(field))
return field_names
def _get_group_query_sql(self, query, group_by):
compiler = query.query.get_compiler(using=query.db)
sql, params = compiler.as_sql()
# insert GROUP BY clause into query
group_fields = self._get_field_names(group_by, query.query.extra_select)
group_by_clause = ' GROUP BY ' + ', '.join(group_fields)
group_by_position = sql.rfind('ORDER BY')
if group_by_position == -1:
group_by_position = len(sql)
sql = (sql[:group_by_position] +
group_by_clause + ' ' +
sql[group_by_position:])
return sql, params
def _get_column_names(self, cursor):
"""
Gets the column names from the cursor description. This method exists
so that it can be mocked in the unit test for sqlite3 compatibility.
"""
return [column_info[0] for column_info in cursor.description]
def execute_group_query(self, query, group_by):
"""
Performs the given query grouped by the fields in group_by with the
given query's extra select fields added. Returns a list of dicts, where
each dict corresponds to single row and contains a key for each grouped
field as well as all of the extra select fields.
"""
sql, params = self._get_group_query_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
field_names = self._get_column_names(cursor)
row_dicts = [dict(zip(field_names, row)) for row in cursor.fetchall()]
return row_dicts
def get_count_sql(self, query):
"""
Get the SQL to properly select a per-group count of unique matches for
a grouped query. Returns a tuple (field alias, field SQL)
"""
if query.query.distinct:
pk_field = self.get_key_on_this_table()
count_sql = 'COUNT(DISTINCT %s)' % pk_field
else:
count_sql = 'COUNT(1)'
return self._GROUP_COUNT_NAME, count_sql
def _get_num_groups_sql(self, query, group_by):
group_fields = self._get_field_names(group_by, query.query.extra_select)
query = query.order_by() # this can mess up the query and isn't needed
compiler = query.query.get_compiler(using=query.db)
sql, params = compiler.as_sql()
from_ = sql[sql.find(' FROM'):]
return ('SELECT DISTINCT %s %s' % (','.join(group_fields),
from_),
params)
def _cursor_rowcount(self, cursor):
"""To be stubbed by tests"""
return cursor.rowcount
def get_num_groups(self, query, group_by):
"""
Returns the number of distinct groups for the given query grouped by the
fields in group_by.
"""
sql, params = self._get_num_groups_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
return self._cursor_rowcount(cursor)
class Machine(dbmodels.Model):
'''
A machine used to run a test
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
machine_idx = dbmodels.AutoField(primary_key=True)
#: The name, such as a FQDN, of the machine that run the test. Must be
#: unique.
hostname = dbmodels.CharField(unique=True, max_length=255)
#: the machine group
machine_group = dbmodels.CharField(blank=True, max_length=240)
#: the machine owner
owner = dbmodels.CharField(blank=True, max_length=240)
class Meta:
db_table = 'tko_machines'
class Kernel(dbmodels.Model):
'''
The Linux Kernel used during a test
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
kernel_idx = dbmodels.AutoField(primary_key=True)
#: the kernel hash
kernel_hash = dbmodels.CharField(max_length=105, editable=False)
#: base
base = dbmodels.CharField(max_length=90)
#: printable
printable = dbmodels.CharField(max_length=300)
class Meta:
db_table = 'tko_kernels'
class Patch(dbmodels.Model):
'''
A Patch applied to a Linux Kernel source during the build process
'''
#: A reference to a :class:`Kernel`
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
#: A descriptive name for the patch
name = dbmodels.CharField(blank=True, max_length=240)
#: The URL where the patch was fetched from
url = dbmodels.CharField(blank=True, max_length=900)
#: hash
the_hash = dbmodels.CharField(blank=True, max_length=105, db_column='hash')
class Meta:
db_table = 'tko_patches'
class Status(dbmodels.Model):
'''
The possible results of a test
These objects are populated automatically from a
:ref:`fixture file <django:initial-data-via-fixtures>`
'''
#: A numeric and automatic integer that uniquely identifies a given
#: machine. This is the primary key for the resulting table created
#: from this model.
status_idx = dbmodels.AutoField(primary_key=True)
#: A short descriptive name for the status. This exact name is searched for
#: while the TKO parser is reading and parsing status files
word = dbmodels.CharField(max_length=30)
class Meta:
db_table = 'tko_status'
class Job(dbmodels.Model, model_logic.ModelExtensions):
"""
A test job, having one or many tests an their results
"""
job_idx = dbmodels.AutoField(primary_key=True)
tag = dbmodels.CharField(unique=True, max_length=100)
label = dbmodels.CharField(max_length=300)
username = dbmodels.CharField(max_length=240)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
queued_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
finished_time = dbmodels.DateTimeField(null=True, blank=True)
#: If this job was scheduled through the AFE application, this points
#: to the related :class:`autotest.frontend.afe.models.Job` object
afe_job_id = dbmodels.IntegerField(null=True, default=None)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_jobs'
class JobKeyval(dbmodels.Model):
job = dbmodels.ForeignKey(Job)
key = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
class Meta:
db_table = 'tko_job_keyvals'
class Test(dbmodels.Model, model_logic.ModelExtensions,
model_logic.ModelWithAttributes):
test_idx = dbmodels.AutoField(primary_key=True)
job = dbmodels.ForeignKey(Job, db_column='job_idx')
test = dbmodels.CharField(max_length=300)
subdir = dbmodels.CharField(blank=True, max_length=300)
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
status = dbmodels.ForeignKey(Status, db_column='status')
reason = dbmodels.CharField(blank=True, max_length=3072)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
finished_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
objects = model_logic.ExtendedManager()
def _get_attribute_model_and_args(self, attribute):
return TestAttribute, dict(test=self, attribute=attribute,
user_created=True)
def set_attribute(self, attribute, value):
# ensure non-user-created attributes remain immutable
try:
TestAttribute.objects.get(test=self, attribute=attribute,
user_created=False)
raise ValueError('Attribute %s already exists for test %s and is '
'immutable' % (attribute, self.test_idx))
except TestAttribute.DoesNotExist:
super(Test, self).set_attribute(attribute, value)
class Meta:
db_table = 'tko_tests'
class TestAttribute(dbmodels.Model, model_logic.ModelExtensions):
test = dbmodels.ForeignKey(Test, db_column='test_idx')
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
user_created = dbmodels.BooleanField(default=False)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_test_attributes'
class IterationAttribute(dbmodels.Model, model_logic.ModelExtensions):
# this isn't really a primary key, but it's necessary to appease Django
# and is harmless as long as we're careful
test = dbmodels.ForeignKey(Test, db_column='test_idx', primary_key=True)
iteration = dbmodels.IntegerField()
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_iteration_attributes'
class IterationResult(dbmodels.Model, model_logic.ModelExtensions):
# see comment on IterationAttribute regarding primary_key=True
test = dbmodels.ForeignKey(Test, db_column='test_idx', primary_key=True)
iteration = dbmodels.IntegerField()
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.FloatField(null=True, blank=True)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_iteration_result'
class TestLabel(dbmodels.Model, model_logic.ModelExtensions):
name = dbmodels.CharField(max_length=80, unique=True)
description = dbmodels.TextField(blank=True)
tests = dbmodels.ManyToManyField(Test, blank=True,
db_table='tko_test_labels_tests')
name_field = 'name'
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_test_labels'
class SavedQuery(dbmodels.Model, model_logic.ModelExtensions):
# TODO: change this to foreign key once DBs are merged
owner = dbmodels.CharField(max_length=80)
name = dbmodels.CharField(max_length=100)
url_token = dbmodels.TextField()
class Meta:
db_table = 'tko_saved_queries'
class EmbeddedGraphingQuery(dbmodels.Model, model_logic.ModelExtensions):
url_token = dbmodels.TextField(null=False, blank=False)
graph_type = dbmodels.CharField(max_length=16, null=False, blank=False)
params = dbmodels.TextField(null=False, blank=False)
last_updated = dbmodels.DateTimeField(null=False, blank=False,
editable=False)
# refresh_time shows the time at which a thread is updating the cached
# image, or NULL if no one is updating the image. This is used so that only
# one thread is updating the cached image at a time (see
# graphing_utils.handle_plot_request)
refresh_time = dbmodels.DateTimeField(editable=False)
cached_png = dbmodels.TextField(editable=False)
class Meta:
db_table = 'tko_embedded_graphing_queries'
# views
class TestViewManager(TempManager):
def get_query_set(self):
query = super(TestViewManager, self).get_query_set()
# add extra fields to selects, using the SQL itself as the "alias"
extra_select = dict((sql, sql)
for sql in self.model.extra_fields.iterkeys())
return query.extra(select=extra_select)
def _get_include_exclude_suffix(self, exclude):
if exclude:
return '_exclude'
return '_include'
def _add_attribute_join(self, query_set, join_condition,
suffix=None, exclude=False):
if suffix is None:
suffix = self._get_include_exclude_suffix(exclude)
return self.add_join(query_set, 'tko_test_attributes',
join_key='test_idx',
join_condition=join_condition,
suffix=suffix, exclude=exclude)
def _add_label_pivot_table_join(self, query_set, suffix, join_condition='',
exclude=False, force_left_join=False):
return self.add_join(query_set, 'tko_test_labels_tests',
join_key='test_id',
join_condition=join_condition,
suffix=suffix, exclude=exclude,
force_left_join=force_left_join)
def _add_label_joins(self, query_set, suffix=''):
query_set = self._add_label_pivot_table_join(
query_set, suffix=suffix, force_left_join=True)
# since we're not joining from the original table, we can't use
# self.add_join() again
second_join_alias = 'tko_test_labels' + suffix
second_join_condition = ('%s.id = %s.testlabel_id' %
(second_join_alias,
'tko_test_labels_tests' + suffix))
query_set.query.add_custom_join('tko_test_labels',
second_join_condition,
query_set.query.LOUTER,
alias=second_join_alias)
return query_set
def _get_label_ids_from_names(self, label_names):
label_ids = list( # listifying avoids a double query below
TestLabel.objects.filter(name__in=label_names)
.values_list('name', 'id'))
if len(label_ids) < len(set(label_names)):
raise ValueError('Not all labels found: %s' %
', '.join(label_names))
return dict(name_and_id for name_and_id in label_ids)
def _include_or_exclude_labels(self, query_set, label_names, exclude=False):
label_ids = self._get_label_ids_from_names(label_names).itervalues()
suffix = self._get_include_exclude_suffix(exclude)
condition = ('tko_test_labels_tests%s.testlabel_id IN (%s)' %
(suffix,
','.join(str(label_id) for label_id in label_ids)))
return self._add_label_pivot_table_join(query_set,
join_condition=condition,
suffix=suffix,
exclude=exclude)
def _add_custom_select(self, query_set, select_name, select_sql):
return query_set.extra(select={select_name: select_sql})
def _add_select_value(self, query_set, alias):
return self._add_custom_select(query_set, alias,
_quote_name(alias) + '.value')
def _add_select_ifnull(self, query_set, alias, non_null_value):
select_sql = "IF(%s.id IS NOT NULL, '%s', NULL)" % (_quote_name(alias),
non_null_value)
return self._add_custom_select(query_set, alias, select_sql)
def _join_test_label_column(self, query_set, label_name, label_id):
alias = 'test_label_' + label_name
label_query = TestLabel.objects.filter(name=label_name)
query_set = Test.objects.join_custom_field(query_set, label_query,
alias)
query_set = self._add_select_ifnull(query_set, alias, label_name)
return query_set
def _join_test_label_columns(self, query_set, label_names):
label_id_map = self._get_label_ids_from_names(label_names)
for label_name in label_names:
query_set = self._join_test_label_column(query_set, label_name,
label_id_map[label_name])
return query_set
def _join_test_attribute(self, query_set, attribute, alias=None,
extra_join_condition=None):
"""
Join the given TestView QuerySet to TestAttribute. The resulting query
has an additional column for the given attribute named
"attribute_<attribute name>".
"""
if not alias:
alias = 'test_attribute_' + attribute
attribute_query = TestAttribute.objects.filter(attribute=attribute)
if extra_join_condition:
attribute_query = attribute_query.extra(
where=[extra_join_condition])
query_set = Test.objects.join_custom_field(query_set, attribute_query,
alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def _join_machine_label_columns(self, query_set, machine_label_names):
for label_name in machine_label_names:
alias = 'machine_label_' + label_name
condition = "FIND_IN_SET('%s', %s)" % (
label_name, _quote_name(alias) + '.value')
query_set = self._join_test_attribute(
query_set, 'host-labels',
alias=alias, extra_join_condition=condition)
query_set = self._add_select_ifnull(query_set, alias, label_name)
return query_set
def _join_one_iteration_key(self, query_set, result_key, first_alias=None):
alias = 'iteration_result_' + result_key
iteration_query = IterationResult.objects.filter(attribute=result_key)
if first_alias:
# after the first join, we need to match up iteration indices,
# otherwise each join will expand the query by the number of
# iterations and we'll have extraneous rows
iteration_query = iteration_query.extra(
where=['%s.iteration = %s.iteration'
% (_quote_name(alias), _quote_name(first_alias))])
query_set = Test.objects.join_custom_field(query_set, iteration_query,
alias, left_join=False)
# select the iteration value and index for this join
query_set = self._add_select_value(query_set, alias)
if not first_alias:
# for first join, add iteration index select too
query_set = self._add_custom_select(
query_set, 'iteration_index',
_quote_name(alias) + '.iteration')
return query_set, alias
def _join_iteration_results(self, test_view_query_set, result_keys):
"""Join the given TestView QuerySet to IterationResult for one result.
The resulting query looks like a TestView query but has one row per
iteration. Each row includes all the attributes of TestView, an
attribute for each key in result_keys and an iteration_index attribute.
We accomplish this by joining the TestView query to IterationResult
once per result key. Each join is restricted on the result key (and on
the test index, like all one-to-many joins). For the first join, this
is the only restriction, so each TestView row expands to a row per
iteration (per iteration that includes the key, of course). For each
subsequent join, we also restrict the iteration index to match that of
the initial join. This makes each subsequent join produce exactly one
result row for each input row. (This assumes each iteration contains
the same set of keys. Results are undefined if that's not true.)
"""
if not result_keys:
return test_view_query_set
query_set, first_alias = self._join_one_iteration_key(
test_view_query_set, result_keys[0])
for result_key in result_keys[1:]:
query_set, _ = self._join_one_iteration_key(query_set, result_key,
first_alias=first_alias)
return query_set
def _join_job_keyvals(self, query_set, job_keyvals):
for job_keyval in job_keyvals:
alias = 'job_keyval_' + job_keyval
keyval_query = JobKeyval.objects.filter(key=job_keyval)
query_set = Job.objects.join_custom_field(query_set, keyval_query,
alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def _join_iteration_attributes(self, query_set, iteration_attributes):
for attribute in iteration_attributes:
alias = 'iteration_attribute_' + attribute
attribute_query = IterationAttribute.objects.filter(
attribute=attribute)
query_set = Test.objects.join_custom_field(query_set,
attribute_query, alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def get_query_set_with_joins(self, filter_data):
"""
Add joins for querying over test-related items.
These parameters are supported going forward:
* test_attribute_fields: list of attribute names. Each attribute will
be available as a column attribute_<name>.value.
* test_label_fields: list of label names. Each label will be available
as a column label_<name>.id, non-null iff the label is present.
* iteration_result_fields: list of iteration result names. Each
result will be available as a column iteration_<name>.value.
Note that this changes the semantics to return iterations
instead of tests -- if a test has multiple iterations, a row
will be returned for each one. The iteration index is also
available as iteration_<name>.iteration.
* machine_label_fields: list of machine label names. Each will be
available as a column machine_label_<name>.id, non-null iff the
label is present on the machine used in the test.
* job_keyval_fields: list of job keyval names. Each value will be
available as a column job_keyval_<name>.id, non-null iff the
keyval is present in the AFE job.
* iteration_attribute_fields: list of iteration attribute names. Each
attribute will be available as a column
iteration_attribute<name>.id, non-null iff the attribute is
present.
These parameters are deprecated:
* include_labels
* exclude_labels
* include_attributes_where
* exclude_attributes_where
Additionally, this method adds joins if the following strings are
present in extra_where (this is also deprecated):
* test_labels
* test_attributes_host_labels
"""
query_set = self.get_query_set()
test_attributes = filter_data.pop('test_attribute_fields', [])
for attribute in test_attributes:
query_set = self._join_test_attribute(query_set, attribute)
test_labels = filter_data.pop('test_label_fields', [])
query_set = self._join_test_label_columns(query_set, test_labels)
machine_labels = filter_data.pop('machine_label_fields', [])
query_set = self._join_machine_label_columns(query_set, machine_labels)
iteration_keys = filter_data.pop('iteration_result_fields', [])
query_set = self._join_iteration_results(query_set, iteration_keys)
job_keyvals = filter_data.pop('job_keyval_fields', [])
query_set = self._join_job_keyvals(query_set, job_keyvals)
iteration_attributes = filter_data.pop('iteration_attribute_fields', [])
query_set = self._join_iteration_attributes(query_set,
iteration_attributes)
# everything that follows is deprecated behavior
joined = False
extra_where = filter_data.get('extra_where', '')
if 'tko_test_labels' in extra_where:
query_set = self._add_label_joins(query_set)
joined = True
include_labels = filter_data.pop('include_labels', [])
exclude_labels = filter_data.pop('exclude_labels', [])
if include_labels:
query_set = self._include_or_exclude_labels(query_set,
include_labels)
joined = True
if exclude_labels:
query_set = self._include_or_exclude_labels(query_set,
exclude_labels,
exclude=True)
joined = True
include_attributes_where = filter_data.pop('include_attributes_where',
'')
exclude_attributes_where = filter_data.pop('exclude_attributes_where',
'')
if include_attributes_where:
query_set = self._add_attribute_join(
query_set,
join_condition=self.escape_user_sql(include_attributes_where))
joined = True
if exclude_attributes_where:
query_set = self._add_attribute_join(
query_set,
join_condition=self.escape_user_sql(exclude_attributes_where),
exclude=True)
joined = True
if not joined:
filter_data['no_distinct'] = True
if 'tko_test_attributes_host_labels' in extra_where:
query_set = self._add_attribute_join(
query_set, suffix='_host_labels',
join_condition='tko_test_attributes_host_labels.attribute = '
'"host-labels"')
return query_set
def query_test_ids(self, filter_data, apply_presentation=True):
query = self.model.query_objects(filter_data,
apply_presentation=apply_presentation)
dicts = query.values('test_idx')
return [item['test_idx'] for item in dicts]
def query_test_label_ids(self, filter_data):
query_set = self.model.query_objects(filter_data)
query_set = self._add_label_joins(query_set, suffix='_list')
rows = self._custom_select_query(query_set, ['tko_test_labels_list.id'])
return [row[0] for row in rows if row[0] is not None]
def escape_user_sql(self, sql):
sql = super(TestViewManager, self).escape_user_sql(sql)
return sql.replace('test_idx', self.get_key_on_this_table('test_idx'))
class TestView(dbmodels.Model, model_logic.ModelExtensions):
extra_fields = {
'DATE(job_queued_time)': 'job queued day',
'DATE(test_finished_time)': 'test finished day',
}
group_fields = [
'test_name',
'status',
'kernel',
'hostname',
'job_tag',
'job_name',
'platform',
'reason',
'job_owner',
'job_queued_time',
'DATE(job_queued_time)',
'test_started_time',
'test_finished_time',
'DATE(test_finished_time)',
]
test_idx = dbmodels.IntegerField('test index', primary_key=True)
job_idx = dbmodels.IntegerField('job index', null=True, blank=True)
test_name = dbmodels.CharField(blank=True, max_length=90)
subdir = dbmodels.CharField('subdirectory', blank=True, max_length=180)
kernel_idx = dbmodels.IntegerField('kernel index')
status_idx = dbmodels.IntegerField('status index')
reason = dbmodels.CharField(blank=True, max_length=3072)
machine_idx = dbmodels.IntegerField('host index')
test_started_time = dbmodels.DateTimeField(null=True, blank=True)
test_finished_time = dbmodels.DateTimeField(null=True, blank=True)
job_tag = dbmodels.CharField(blank=True, max_length=300)
job_name = dbmodels.CharField(blank=True, max_length=300)
job_owner = dbmodels.CharField('owner', blank=True, max_length=240)
job_queued_time = dbmodels.DateTimeField(null=True, blank=True)
job_started_time = dbmodels.DateTimeField(null=True, blank=True)
job_finished_time = dbmodels.DateTimeField(null=True, blank=True)
afe_job_id = dbmodels.IntegerField(null=True)
hostname = dbmodels.CharField(blank=True, max_length=300)
platform = dbmodels.CharField(blank=True, max_length=240)
machine_owner = dbmodels.CharField(blank=True, max_length=240)
kernel_hash = dbmodels.CharField(blank=True, max_length=105)
kernel_base = dbmodels.CharField(blank=True, max_length=90)
kernel = dbmodels.CharField(blank=True, max_length=300)
status = dbmodels.CharField(blank=True, max_length=30)
objects = TestViewManager()
def save(self):
raise NotImplementedError('TestView is read-only')
def delete(self):
raise NotImplementedError('TestView is read-only')
@classmethod
def query_objects(cls, filter_data, initial_query=None,
apply_presentation=True):
if initial_query is None:
initial_query = cls.objects.get_query_set_with_joins(filter_data)
return super(TestView, cls).query_objects(
filter_data, initial_query=initial_query,
apply_presentation=apply_presentation)
class Meta:
db_table = 'tko_test_view_2'
managed = False
| yangdongsheng/autotest | frontend/tko/models.py | Python | gpl-2.0 | 30,170 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image embedding ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets.inception_v3 import inception_v3_base
slim = tf.contrib.slim
def inception_v3(images,
trainable=True,
is_training=True,
weight_decay=0.00004,
stddev=0.1,
dropout_keep_prob=0.8,
use_batch_norm=True,
batch_norm_params=None,
add_summaries=True,
scope="InceptionV3"):
"""Builds an Inception V3 subgraph for image embeddings.
Args:
images: A float32 Tensor of shape [batch, height, width, channels].
trainable: Whether the inception submodel should be trainable or not.
is_training: Boolean indicating training mode or not.
weight_decay: Coefficient for weight regularization.
stddev: The standard deviation of the trunctated normal weight initializer.
dropout_keep_prob: Dropout keep probability.
use_batch_norm: Whether to use batch normalization.
batch_norm_params: Parameters for batch normalization. See
tf.contrib.layers.batch_norm for details.
add_summaries: Whether to add activation summaries.
scope: Optional Variable scope.
Returns:
end_points: A dictionary of activations from inception_v3 layers.
"""
# Only consider the inception model to be in training mode if it's trainable.
is_inception_model_training = trainable and is_training
if use_batch_norm:
# Default parameters for batch normalization.
if not batch_norm_params:
batch_norm_params = {
"is_training": is_inception_model_training,
"trainable": trainable,
# Decay for the moving averages.
"decay": 0.9997,
# Epsilon to prevent 0s in variance.
"epsilon": 0.001,
# Collection containing the moving mean and moving variance.
"variables_collections": {
"beta": None,
"gamma": None,
"moving_mean": ["moving_vars"],
"moving_variance": ["moving_vars"],
}
}
else:
batch_norm_params = None
if trainable:
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
weights_regularizer = None
with tf.variable_scope(scope, "InceptionV3", [images]) as scope:
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
trainable=trainable):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
net, end_points = inception_v3_base(images,final_endpoint = 'Conv2d_1a_3x3',scope=scope)
with tf.variable_scope("logits"):
shape = net.get_shape()
net = slim.avg_pool2d(net, shape[1:3], padding="VALID", scope="pool")
net = slim.dropout(
net,
keep_prob=dropout_keep_prob,
is_training=is_inception_model_training,
scope="dropout")
net = slim.flatten(net, scope="flatten")
# Add summaries.
if add_summaries:
for v in end_points.values():
tf.contrib.layers.summaries.summarize_activation(v)
return net
| jthurst3/MemeCaptcha | models_cnn_lstm/im2txt/im2txt/ops/image_embedding.py | Python | mit | 4,173 |
# -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 - 2014 Odoo Canada. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import (
hr_job,
hr_contribution_category,
hr_job_contribution,
)
| dufresnedavid/canada | l10n_ca_hr_payroll_job_position/__init__.py | Python | agpl-3.0 | 1,028 |
# event/base.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base implementation classes.
The public-facing ``Events`` serves as the base class for an event interface;
its public attributes represent different kinds of events. These attributes
are mirrored onto a ``_Dispatch`` class, which serves as a container for
collections of listener functions. These collections are represented both
at the class level of a particular ``_Dispatch`` class as well as within
instances of ``_Dispatch``.
"""
from __future__ import absolute_import
from .. import util
from .attr import _JoinedDispatchDescriptor, \
_EmptyListener, _DispatchDescriptor
_registrars = util.defaultdict(list)
def _is_event_name(name):
return not name.startswith('_') and name != 'dispatch'
class _UnpickleDispatch(object):
"""Serializable callable that re-generates an instance of
:class:`_Dispatch` given a particular :class:`.Events` subclass.
"""
def __call__(self, _parent_cls):
for cls in _parent_cls.__mro__:
if 'dispatch' in cls.__dict__:
return cls.__dict__['dispatch'].dispatch_cls(_parent_cls)
else:
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
A :class:`._Dispatch` class is generated for each :class:`.Events`
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
"""
_events = None
"""reference the :class:`.Events` class which this
:class:`._Dispatch` is created for."""
def __init__(self, _parent_cls):
self._parent_cls = _parent_cls
@util.classproperty
def _listen(cls):
return cls._events._listen
def _join(self, other):
"""Create a 'join' of this :class:`._Dispatch` and another.
This new dispatcher will dispatch events to both
:class:`._Dispatch` objects.
"""
if '_joined_dispatch_cls' not in self.__class__.__dict__:
cls = type(
"Joined%s" % self.__class__.__name__,
(_JoinedDispatcher, self.__class__), {}
)
for ls in _event_descriptors(self):
setattr(cls, ls.name, _JoinedDispatchDescriptor(ls.name))
self.__class__._joined_dispatch_cls = cls
return self._joined_dispatch_cls(self, other)
def __reduce__(self):
return _UnpickleDispatch(), (self._parent_cls, )
def _update(self, other, only_propagate=True):
"""Populate from the listeners in another :class:`_Dispatch`
object."""
for ls in _event_descriptors(other):
if isinstance(ls, _EmptyListener):
continue
getattr(self, ls.name).\
for_modify(self)._update(ls, only_propagate=only_propagate)
@util.hybridmethod
def _clear(self):
for attr in dir(self):
if _is_event_name(attr):
getattr(self, attr).for_modify(self).clear()
def _event_descriptors(target):
return [getattr(target, k) for k in dir(target) if _is_event_name(k)]
class _EventMeta(type):
"""Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
_create_dispatcher_class(cls, classname, bases, dict_)
return type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
dispatch_base = getattr(cls, 'dispatch', _Dispatch)
dispatch_cls = type("%sDispatch" % classname,
(dispatch_base, ), {})
cls._set_dispatch(cls, dispatch_cls)
for k in dict_:
if _is_event_name(k):
setattr(dispatch_cls, k, _DispatchDescriptor(cls, dict_[k]))
_registrars[k].append(cls)
if getattr(cls, '_dispatch_target', None):
cls._dispatch_target.dispatch = dispatcher(cls)
def _remove_dispatcher(cls):
for k in dir(cls):
if _is_event_name(k):
_registrars[k].remove(cls)
if not _registrars[k]:
del _registrars[k]
class Events(util.with_metaclass(_EventMeta, object)):
"""Define event listening functions for a particular target type."""
@staticmethod
def _set_dispatch(cls, dispatch_cls):
# this allows an Events subclass to define additional utility
# methods made available to the target via
# "self.dispatch._events.<utilitymethod>"
# @staticemethod to allow easy "super" calls while in a metaclass
# constructor.
cls.dispatch = dispatch_cls
dispatch_cls._events = cls
@classmethod
def _accept_with(cls, target):
# Mapper, ClassManager, Session override this to
# also accept classes, scoped_sessions, sessionmakers, etc.
if hasattr(target, 'dispatch') and (
isinstance(target.dispatch, cls.dispatch) or
isinstance(target.dispatch, type) and
issubclass(target.dispatch, cls.dispatch)
):
return target
else:
return None
@classmethod
def _listen(cls, event_key, propagate=False, insert=False, named=False):
event_key.base_listen(propagate=propagate, insert=insert, named=named)
@classmethod
def _remove(cls, event_key):
event_key.remove()
@classmethod
def _clear(cls):
cls.dispatch._clear()
class _JoinedDispatcher(object):
"""Represent a connection between two _Dispatch objects."""
def __init__(self, local, parent):
self.local = local
self.parent = parent
self._parent_cls = local._parent_cls
class dispatcher(object):
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.
"""
def __init__(self, events):
self.dispatch_cls = events.dispatch
self.events = events
def __get__(self, obj, cls):
if obj is None:
return self.dispatch_cls
obj.__dict__['dispatch'] = disp = self.dispatch_cls(cls)
return disp
| adamwwt/chvac | venv/lib/python2.7/site-packages/sqlalchemy/event/base.py | Python | mit | 7,248 |
# -*- coding: utf-8 -*-
from ..internal.misc import json, set_cookie
from ..internal.MultiAccount import MultiAccount
class SimplyPremiumCom(MultiAccount):
__name__ = "SimplyPremiumCom"
__type__ = "account"
__version__ = "0.15"
__status__ = "testing"
__config__ = [("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12)]
__description__ = """Simply-Premium.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "evolutionclip@live.de")]
def grab_hosters(self, user, password, data):
json_data = self.load("http://www.simply-premium.com/api/hosts.php",
get={'format': "json",
'online': 1})
json_data = json.loads(json_data)
host_list = [element['regex'] for element in json_data['result']]
return host_list
def grab_info(self, user, password, data):
premium = False
validuntil = -1
trafficleft = None
json_data = self.load('http://www.simply-premium.com/api/user.php?format=json')
self.log_debug("JSON data: %s" % json_data)
json_data = json.loads(json_data)
if 'vip' in json_data['result'] and json_data['result']['vip']:
premium = True
if 'timeend' in json_data['result'] and json_data['result']['timeend']:
validuntil = float(json_data['result']['timeend'])
if 'remain_traffic' in json_data['result'] and json_data['result']['remain_traffic']:
trafficleft = float(json_data['result']['remain_traffic'])
return {'premium': premium, 'validuntil': validuntil,
'trafficleft': trafficleft}
def signin(self, user, password, data):
set_cookie(self.req.cj, "simply-premium.com", "lang", "EN")
html = self.load("https://www.simply-premium.com/login.php",
post={'key': user} if not password else {'login_name': user, 'login_pass': password})
if 'logout' not in html:
self.fail_login()
| igel-kun/pyload | module/plugins/accounts/SimplyPremiumCom.py | Python | gpl-3.0 | 2,214 |
from __future__ import print_function
from typing import cast, Any, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Text
from confirmation.models import Confirmation, create_confirmation_link
from django.conf import settings
from django.template import loader
from django.utils.timezone import now as timezone_now
from zerver.decorator import statsd_increment
from zerver.lib.send_email import send_future_email, \
send_email_from_dict, FromAddress
from zerver.lib.queue import queue_json_publish
from zerver.models import (
Recipient,
ScheduledEmail,
UserMessage,
Stream,
get_display_recipient,
UserProfile,
get_user,
get_user_profile_by_id,
receives_offline_notifications,
get_context_for_message,
Message,
Realm,
)
import datetime
from email.utils import formataddr
import re
import subprocess
import ujson
from six.moves import urllib
from collections import defaultdict
def one_click_unsubscribe_link(user_profile, email_type):
# type: (UserProfile, str) -> str
"""
Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in.
"""
return create_confirmation_link(user_profile, user_profile.realm.host,
Confirmation.UNSUBSCRIBE,
url_args = {'email_type': email_type})
def hash_util_encode(string):
# type: (Text) -> Text
# Do the same encoding operation as hash_util.encodeHashComponent on the
# frontend.
# `safe` has a default value of "/", but we want those encoded, too.
return urllib.parse.quote(
string.encode("utf-8"), safe=b"").replace(".", "%2E").replace("%", ".")
def pm_narrow_url(realm, participants):
# type: (Realm, List[Text]) -> Text
participants.sort()
base_url = u"%s/#narrow/pm-with/" % (realm.uri,)
return base_url + hash_util_encode(",".join(participants))
def stream_narrow_url(realm, stream):
# type: (Realm, Text) -> Text
base_url = u"%s/#narrow/stream/" % (realm.uri,)
return base_url + hash_util_encode(stream)
def topic_narrow_url(realm, stream, topic):
# type: (Realm, Text, Text) -> Text
base_url = u"%s/#narrow/stream/" % (realm.uri,)
return u"%s%s/topic/%s" % (base_url, hash_util_encode(stream),
hash_util_encode(topic))
def build_message_list(user_profile, messages):
# type: (UserProfile, List[Message]) -> List[Dict[str, Any]]
"""
Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render = [] # type: List[Dict[str, Any]]
def sender_string(message):
# type: (Message) -> Text
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
return message.sender.full_name
else:
return ''
def relative_to_full_url(content):
# type: (Text) -> Text
# URLs for uploaded content are of the form
# "/user_uploads/abc.png". Make them full paths.
#
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage.
content = re.sub(
r"/user_uploads/(\S*)",
user_profile.realm.uri + r"/user_uploads/\1", content)
# Our proxying user-uploaded images seems to break inline images in HTML
# emails, so scrub the image but leave the link.
content = re.sub(
r"<img src=(\S+)/user_uploads/(\S+)>", "", content)
# URLs for emoji are of the form
# "static/generated/emoji/images/emoji/snowflake.png".
content = re.sub(
r"/static/generated/emoji/images/emoji/",
user_profile.realm.uri + r"/static/generated/emoji/images/emoji/",
content)
# Realm emoji should use absolute URLs when referenced in missed-message emails.
content = re.sub(
r"/user_avatars/(\d+)/emoji/",
user_profile.realm.uri + r"/user_avatars/\1/emoji/", content)
# Stream links need to be converted from relative to absolute. They
# have href values in the form of "/#narrow/stream/...".
content = re.sub(
r"/#narrow/stream/",
user_profile.realm.uri + r"/#narrow/stream/",
content)
return content
def fix_plaintext_image_urls(content):
# type: (Text) -> Text
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def fix_emoji_sizes(html):
# type: (Text) -> Text
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
# type: (Message) -> Dict[str, Text]
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
assert message.rendered_content is not None
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
# type: (Message) -> Dict[str, Any]
sender = sender_string(message)
return {'sender': sender,
'content': [build_message_payload(message)]}
def message_header(user_profile, message):
# type: (UserProfile, Message) -> Dict[str, Any]
disp_recipient = get_display_recipient(message.recipient)
if message.recipient.type == Recipient.PERSONAL:
header = u"You and %s" % (message.sender.full_name,)
html_link = pm_narrow_url(user_profile.realm, [message.sender.email])
header_html = u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
elif message.recipient.type == Recipient.HUDDLE:
assert not isinstance(disp_recipient, Text)
other_recipients = [r['full_name'] for r in disp_recipient
if r['email'] != user_profile.email]
header = u"You and %s" % (", ".join(other_recipients),)
html_link = pm_narrow_url(user_profile.realm, [r["email"] for r in disp_recipient
if r["email"] != user_profile.email])
header_html = u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
else:
assert isinstance(disp_recipient, Text)
header = u"%s > %s" % (disp_recipient, message.topic_name())
stream_link = stream_narrow_url(user_profile.realm, disp_recipient)
topic_link = topic_narrow_url(user_profile.realm, disp_recipient, message.subject)
header_html = u"<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (
stream_link, disp_recipient, topic_link, message.subject)
return {"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream"}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.pub_date)
for message in messages:
header = message_header(user_profile, message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]['header'] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]['senders']
# Same message sender, collapse again
if sender_block[-1]['sender'] == sender:
sender_block[-1]['content'].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {'header': header,
'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
# type: (UserProfile, List[Message], int) -> None
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about they should
all have the same recipient and subject
"""
from zerver.context_processors import common_context
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
recipients = set((msg.recipient_id, msg.subject) for msg in missed_messages)
if len(recipients) != 1:
raise ValueError(
'All missed_messages must have the same recipient and subject %r' %
recipients
)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
context = common_context(user_profile)
context.update({
'name': user_profile.full_name,
'messages': build_message_list(user_profile, missed_messages),
'message_count': message_count,
'mention': missed_messages[0].recipient.type == Recipient.STREAM,
'unsubscribe_link': unsubscribe_link,
})
# If this setting (email mirroring integration) is enabled, only then
# can users reply to email to send message to Zulip. Thus, one must
# ensure to display warning in the template.
if settings.EMAIL_GATEWAY_PATTERN:
context.update({
'reply_warning': False,
'reply_to_zulip': True,
})
else:
context.update({
'reply_warning': True,
'reply_to_zulip': False,
})
from zerver.lib.email_mirror import create_missed_message_address
reply_to_address = create_missed_message_address(user_profile, missed_messages[0])
if reply_to_address == FromAddress.NOREPLY:
reply_to_name = None
else:
reply_to_name = "Zulip"
senders = list(set(m.sender for m in missed_messages))
if (missed_messages[0].recipient.type == Recipient.HUDDLE):
display_recipient = get_display_recipient(missed_messages[0].recipient)
# Make sure that this is a list of strings, not a string.
assert not isinstance(display_recipient, Text)
other_recipients = [r['full_name'] for r in display_recipient
if r['id'] != user_profile.id]
context.update({'group_pm': True})
if len(other_recipients) == 2:
huddle_display_name = u"%s" % (" and ".join(other_recipients))
context.update({'huddle_display_name': huddle_display_name})
elif len(other_recipients) == 3:
huddle_display_name = u"%s, %s, and %s" % (other_recipients[0], other_recipients[1], other_recipients[2])
context.update({'huddle_display_name': huddle_display_name})
else:
huddle_display_name = u"%s, and %s others" % (', '.join(other_recipients[:2]), len(other_recipients) - 2)
context.update({'huddle_display_name': huddle_display_name})
elif (missed_messages[0].recipient.type == Recipient.PERSONAL):
context.update({'private_message': True})
else:
# Keep only the senders who actually mentioned the user
#
# TODO: When we add wildcard mentions that send emails, add
# them to the filter here.
senders = list(set(m.sender for m in missed_messages if
UserMessage.objects.filter(message=m, user_profile=user_profile,
flags=UserMessage.flags.mentioned).exists()))
context.update({'at_mention': True})
context.update({
'sender_str': ", ".join(sender.full_name for sender in senders),
'realm_str': user_profile.realm.name,
})
from_name = "Zulip missed messages" # type: Text
from_address = FromAddress.NOREPLY
if len(senders) == 1 and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER:
# If this setting is enabled, you can reply to the Zulip
# missed message emails directly back to the original sender.
# However, one must ensure the Zulip server is in the SPF
# record for the domain, or there will be spam/deliverability
# problems.
sender = senders[0]
from_name, from_address = (sender.full_name, sender.email)
context.update({
'reply_warning': False,
'reply_to_zulip': False,
})
email_dict = {
'template_prefix': 'zerver/emails/missed_message',
'to_user_id': user_profile.id,
'from_name': from_name,
'from_address': from_address,
'reply_to_email': formataddr((reply_to_name, reply_to_address)),
'context': context}
queue_json_publish("missedmessage_email_senders", email_dict, send_email_from_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=['last_reminder'])
def handle_missedmessage_emails(user_profile_id, missed_email_events):
# type: (int, Iterable[Dict[str, Any]]) -> None
message_ids = [event.get('message_id') for event in missed_email_events]
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_notifications(user_profile):
return
messages = Message.objects.filter(usermessage__user_profile_id=user_profile,
id__in=message_ids,
usermessage__flags=~UserMessage.flags.read)
# Cancel missed-message emails for deleted messages
messages = [um for um in messages if um.content != "(deleted)"]
if not messages:
return
messages_by_recipient_subject = defaultdict(list) # type: Dict[Tuple[int, Text], List[Message]]
for msg in messages:
messages_by_recipient_subject[(msg.recipient_id, msg.topic_name())].append(msg)
message_count_by_recipient_subject = {
recipient_subject: len(msgs)
for recipient_subject, msgs in messages_by_recipient_subject.items()
}
for msg_list in messages_by_recipient_subject.values():
msg = min(msg_list, key=lambda msg: msg.pub_date)
if msg.recipient.type == Recipient.STREAM:
msg_list.extend(get_context_for_message(msg))
# Send an email per recipient subject pair
for recipient_subject, msg_list in messages_by_recipient_subject.items():
unique_messages = {m.id: m for m in msg_list}
do_send_missedmessage_events_reply_in_zulip(
user_profile,
list(unique_messages.values()),
message_count_by_recipient_subject[recipient_subject],
)
def clear_scheduled_emails(user_id, email_type=None):
# type: (int, Optional[int]) -> None
items = ScheduledEmail.objects.filter(user_id=user_id)
if email_type is not None:
items = items.filter(type=email_type)
items.delete()
def log_digest_event(msg):
# type: (Text) -> None
import logging
logging.basicConfig(filename=settings.DIGEST_LOG_PATH, level=logging.INFO)
logging.info(msg)
def enqueue_welcome_emails(user_id):
# type: (int) -> None
from zerver.context_processors import common_context
if settings.WELCOME_EMAIL_SENDER is not None:
# line break to avoid triggering lint rule
from_name = settings.WELCOME_EMAIL_SENDER['name']
from_address = settings.WELCOME_EMAIL_SENDER['email']
else:
from_name = None
from_address = FromAddress.SUPPORT
user_profile = get_user_profile_by_id(user_id)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
context = common_context(user_profile)
context.update({
'unsubscribe_link': unsubscribe_link,
'organization_setup_advice_link':
user_profile.realm.uri + '%s/help/getting-your-organization-started-with-zulip',
'is_realm_admin': user_profile.is_realm_admin,
})
send_future_email(
"zerver/emails/followup_day1", to_user_id=user_id, from_name=from_name,
from_address=from_address, context=context, delay=datetime.timedelta(hours=1))
send_future_email(
"zerver/emails/followup_day2", to_user_id=user_id, from_name=from_name,
from_address=from_address, context=context, delay=datetime.timedelta(days=1))
def convert_html_to_markdown(html):
# type: (Text) -> Text
# On Linux, the tool installs as html2markdown, and there's a command called
# html2text that does something totally different. On OSX, the tool installs
# as html2text.
commands = ["html2markdown", "html2text"]
for command in commands:
try:
# A body width of 0 means do not try to wrap the text for us.
p = subprocess.Popen(
[command, "--body-width=0"], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
break
except OSError:
continue
markdown = p.communicate(input=html.encode('utf-8'))[0].decode('utf-8').strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form ``, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `` into
# `[image.png](http://foo.com/image.png)`.
return re.sub(u"!\\[\\]\\((\\S*)/(\\S*)\\?(\\S*)\\)",
u"[\\2](\\1/\\2)", markdown)
| vaidap/zulip | zerver/lib/notifications.py | Python | apache-2.0 | 18,740 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FormDefinition.redirect_delay'
db.add_column(u'djangocms_forms_formdefinition', 'redirect_delay',
self.gf('django.db.models.fields.PositiveIntegerField')(blank=True, null=True))
def backwards(self, orm):
# Deleting field 'FormDefinition.redirect_delay'
db.delete_column(u'djangocms_forms_formdefinition', 'redirect_delay')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'djangocms_forms.form': {
'Meta': {'object_name': 'Form'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'djangocms_forms.formdefinition': {
'Meta': {'object_name': 'FormDefinition', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email_to': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email_uploaded_files': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'external_redirect': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'form_template': ('django.db.models.fields.CharField', [], {'default': "'djangocms_forms/form_template/default.html'", 'max_length': '150', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'page_redirect': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'plugin_reference': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plugin'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['djangocms_forms.Form']"}),
'post_submit_msg': ('django.db.models.fields.TextField', [], {'default': "u'Thank You'", 'blank': 'True'}),
'save_data': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'spam_protection': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'submit_btn_txt': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '100'}),
'success_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'redirect_delay': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True', 'null': 'True'}),
},
u'djangocms_forms.formfield': {
'Meta': {'ordering': "('position',)", 'object_name': 'FormField'},
'choice_values': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '100'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['djangocms_forms.FormDefinition']"}),
'help_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'djangocms_forms.formsubmission': {
'Meta': {'ordering': "('-creation_date',)", 'object_name': 'FormSubmission'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'form_data': ('jsonfield.fields.JSONField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True', 'blank': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['djangocms_forms.Form']"}),
'referrer': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['djangocms_forms']
| mishbahr/djangocms-forms | djangocms_forms/south_migrations/0003_auto_add_redirect_delay.py | Python | bsd-3-clause | 14,498 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.