max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
tools/autotest.py | zhongxinghong/Botzone-Tank2 | 11 | 6633051 | <filename>tools/autotest.py
# -*- coding: utf-8 -*-
# @Author: Administrator
# @Date: 2019-05-04 02:40:01
# @Last Modified by: Administrator
# @Last Modified time: 2019-05-16 17:31:20
"""
自动化测试工具
------------------
让 bot 过一遍特定的数据集
如果出现 bug 就可以及时修复 ...
和 simulator 使用相同的壳
"""
import sys
sys.path.append("../")
from core import const as game_const
import os
import re
import time
import json
import subprocess
import multiprocessing
from _lib.utils import json_load
from _lib.simulator.const import BLUE_INPUT_JSON_FILENAME, RED_INPUT_JSON_FILENAME, DATASET_DIR
from _lib.simulator.utils import cut_by_turn
from _lib.simulator.stream import SimulatorConsoleOutputStream, SimulatorTextInputStream
from _lib.autotest.const import CONFIG_JSON_FILE
try:
config = json_load(CONFIG_JSON_FILE)
except json.JSONDecodeError as e: # 配置文件写错
raise e
## 环境变量设置 ##
game_const.DEBUG_MODE = config["environment"]["debug"]
game_const.LONG_RUNNING_MODE = config["environment"]["long_running"]
DATASET = config["dataset"]
CUT_OFF_LINE = "-" * 60
def main():
from main import main as run_game
inputJSONFiles = []
for matchID in DATASET:
blueInputJSONFile = os.path.join(DATASET_DIR, matchID, BLUE_INPUT_JSON_FILENAME)
redInputJSONFile = os.path.join(DATASET_DIR, matchID, RED_INPUT_JSON_FILENAME)
if os.path.exists(blueInputJSONFile):
inputJSONFiles.append(blueInputJSONFile)
if os.path.exists(redInputJSONFile):
inputJSONFiles.append(redInputJSONFile)
inputJSONFiles = [ os.path.abspath(file) for file in inputJSONFiles ] # to abspath
parentConnection, childrenConnection = multiprocessing.Pipe()
for file in inputJSONFiles:
try:
wholeInputJSON = json_load(file)
except json.JSONDecodeError as e:
print("[Error] failed to load %s" % file)
continue
print("Case %s" % file)
print(CUT_OFF_LINE)
data = None
globaldata = None
totalTurn = len(wholeInputJSON["responses"])
for turn in range(1, totalTurn+2):
inputJSON = cut_by_turn(wholeInputJSON, turn)
if data is not None:
inputJSON["data"] = data
if globaldata is not None:
inputJSON["globaldata"] = globaldata
istream = SimulatorTextInputStream(json.dumps(inputJSON))
ostream = SimulatorConsoleOutputStream(connection=childrenConnection, hide_data=True)
p = multiprocessing.Process( target=run_game, args=(istream, ostream) )
p.daemon = True
p.start()
output = parentConnection.recv()
p.join()
if p.exitcode != 0:
break
outputJSON = json.loads(output)
data = outputJSON.get("data")
globaldata = outputJSON.get("globaldata")
if data is not None:
inputJSON["data"] = data
print('')
if __name__ == '__main__':
main() | <filename>tools/autotest.py
# -*- coding: utf-8 -*-
# @Author: Administrator
# @Date: 2019-05-04 02:40:01
# @Last Modified by: Administrator
# @Last Modified time: 2019-05-16 17:31:20
"""
自动化测试工具
------------------
让 bot 过一遍特定的数据集
如果出现 bug 就可以及时修复 ...
和 simulator 使用相同的壳
"""
import sys
sys.path.append("../")
from core import const as game_const
import os
import re
import time
import json
import subprocess
import multiprocessing
from _lib.utils import json_load
from _lib.simulator.const import BLUE_INPUT_JSON_FILENAME, RED_INPUT_JSON_FILENAME, DATASET_DIR
from _lib.simulator.utils import cut_by_turn
from _lib.simulator.stream import SimulatorConsoleOutputStream, SimulatorTextInputStream
from _lib.autotest.const import CONFIG_JSON_FILE
try:
config = json_load(CONFIG_JSON_FILE)
except json.JSONDecodeError as e: # 配置文件写错
raise e
## 环境变量设置 ##
game_const.DEBUG_MODE = config["environment"]["debug"]
game_const.LONG_RUNNING_MODE = config["environment"]["long_running"]
DATASET = config["dataset"]
CUT_OFF_LINE = "-" * 60
def main():
from main import main as run_game
inputJSONFiles = []
for matchID in DATASET:
blueInputJSONFile = os.path.join(DATASET_DIR, matchID, BLUE_INPUT_JSON_FILENAME)
redInputJSONFile = os.path.join(DATASET_DIR, matchID, RED_INPUT_JSON_FILENAME)
if os.path.exists(blueInputJSONFile):
inputJSONFiles.append(blueInputJSONFile)
if os.path.exists(redInputJSONFile):
inputJSONFiles.append(redInputJSONFile)
inputJSONFiles = [ os.path.abspath(file) for file in inputJSONFiles ] # to abspath
parentConnection, childrenConnection = multiprocessing.Pipe()
for file in inputJSONFiles:
try:
wholeInputJSON = json_load(file)
except json.JSONDecodeError as e:
print("[Error] failed to load %s" % file)
continue
print("Case %s" % file)
print(CUT_OFF_LINE)
data = None
globaldata = None
totalTurn = len(wholeInputJSON["responses"])
for turn in range(1, totalTurn+2):
inputJSON = cut_by_turn(wholeInputJSON, turn)
if data is not None:
inputJSON["data"] = data
if globaldata is not None:
inputJSON["globaldata"] = globaldata
istream = SimulatorTextInputStream(json.dumps(inputJSON))
ostream = SimulatorConsoleOutputStream(connection=childrenConnection, hide_data=True)
p = multiprocessing.Process( target=run_game, args=(istream, ostream) )
p.daemon = True
p.start()
output = parentConnection.recv()
p.join()
if p.exitcode != 0:
break
outputJSON = json.loads(output)
data = outputJSON.get("data")
globaldata = outputJSON.get("globaldata")
if data is not None:
inputJSON["data"] = data
print('')
if __name__ == '__main__':
main() | zh | 0.58096 | # -*- coding: utf-8 -*- # @Author: Administrator # @Date: 2019-05-04 02:40:01 # @Last Modified by: Administrator # @Last Modified time: 2019-05-16 17:31:20 自动化测试工具 ------------------ 让 bot 过一遍特定的数据集 如果出现 bug 就可以及时修复 ... 和 simulator 使用相同的壳 # 配置文件写错 ## 环境变量设置 ## # to abspath | 1.943787 | 2 |
src/OrganMatching/views.py | ShikharJ/Organ-Exchange | 2 | 6633052 | <filename>src/OrganMatching/views.py<gh_stars>1-10
from django.http import HttpResponse
from django.shortcuts import render, redirect
from OrganMatching.misc import *
from OrganMatching.algo import *
blood_groups = ["A", "B", "AB", "O"]
rhesus_factors = ["+", "-"]
reports = ["Positive", "Negative"]
def index(request):
try:
del request.session['user']
except:
pass
return render(request, "OrganMatching/login.html")
def admin(request):
try:
return render(request, "OrganMatching/admin.html")
except:
return render(request, "OrganMatching/notadmin.html")
def submit(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
username = request.POST.get("Username")
password = <PASSWORD>("Password")
if username == "" or password == "":
return render(request, "OrganMatching/lost.html", {"Username": username, "Error": "Both fields must be filled!"})
ADMIN = "Shikhar"
############### UNCOMMENT FOR NO AUTHENTICATION ################
user_id = username
auth = True
################################################################
if auth:
if username == ADMIN:
request.session['user'] = "admin"
return redirect("admin")
else:
patient = get_content(user_id)
donors = get_donors()
if not donors:
return render(request, "OrganMatching/login.html", {"Username": username, "Error": "donors.csv has not been uploaded by the admin."})
return render(request, "OrganMatching/index.html", {"Username": username, "User_ID": user_id, "patient": patient, "donors": donors, "blood_groups": blood_groups, "rhesus_factors": rhesus_factors, "reports": reports, "range":range(len(patient) - 6), "orgreq":patient[6:]})
else:
return render(request, "OrganMatching/login.html", {"Username": username, "Error": "Your credentials are incorrect!"})
def resultview(request):
try:
final_list, donor_list = gale_shapley("static/donors.csv", "static/patients.csv")
return render(request, "OrganMatching/result.html", {"final_list": final_list})
except:
return render(request, "OrganMatching/notadmin.html")
def resultcsv(request):
try:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results.csv"'
final_list, donor_list = gale_shapley("static/donors.csv", "static/patients.csv")
writer = csv.writer(response)
writer.writerows(final_list)
return response
except:
return render(request, "OrganMatching/notadmin.html")
def upload(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
if len(request.FILES) != 2:
return render(request, "OrganMatching/admin.html", {"Error": "Choose Both Files!"})
improvise(request.FILES['file1'], request.FILES['file2'])
return render(request, "OrganMatching/uploaded.html")
def saved(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
post_data = request.POST
error = is_correct(post_data)
if error == "None":
edit_csv(post_data)
warn = ""
if int(post_data.get("Age")) < 18:
warn = "Your age is below the legally permitted age, but the form was submitted!"
return render(request, "OrganMatching/saved.html", {"Warn": warn})
else:
user_id = post_data.get("User_ID")
username = post_data.get("Username")
patient = [user_id, post_data.get("Name"), post_data.get("Blood_Report"), post_data.get("Age"), post_data.get("Blood_Group"), post_data.get("Rhesus_Factor")]
for i in range(len(post_data) - 8):
patient.append(post_data.get("Organ_Requirement" + str(i + 1)))
donors = get_donors()
return render(request, "OrganMatching/index.html", {"Username": username, "User_ID": user_id, "patient": patient, "donors": donors, "blood_groups": blood_groups, "rhesus_factors": rhesus_factors, "reports": reports, "range":range(len(patient) - 6), "orgreq":patient[6:], "Error": error})
| <filename>src/OrganMatching/views.py<gh_stars>1-10
from django.http import HttpResponse
from django.shortcuts import render, redirect
from OrganMatching.misc import *
from OrganMatching.algo import *
blood_groups = ["A", "B", "AB", "O"]
rhesus_factors = ["+", "-"]
reports = ["Positive", "Negative"]
def index(request):
try:
del request.session['user']
except:
pass
return render(request, "OrganMatching/login.html")
def admin(request):
try:
return render(request, "OrganMatching/admin.html")
except:
return render(request, "OrganMatching/notadmin.html")
def submit(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
username = request.POST.get("Username")
password = <PASSWORD>("Password")
if username == "" or password == "":
return render(request, "OrganMatching/lost.html", {"Username": username, "Error": "Both fields must be filled!"})
ADMIN = "Shikhar"
############### UNCOMMENT FOR NO AUTHENTICATION ################
user_id = username
auth = True
################################################################
if auth:
if username == ADMIN:
request.session['user'] = "admin"
return redirect("admin")
else:
patient = get_content(user_id)
donors = get_donors()
if not donors:
return render(request, "OrganMatching/login.html", {"Username": username, "Error": "donors.csv has not been uploaded by the admin."})
return render(request, "OrganMatching/index.html", {"Username": username, "User_ID": user_id, "patient": patient, "donors": donors, "blood_groups": blood_groups, "rhesus_factors": rhesus_factors, "reports": reports, "range":range(len(patient) - 6), "orgreq":patient[6:]})
else:
return render(request, "OrganMatching/login.html", {"Username": username, "Error": "Your credentials are incorrect!"})
def resultview(request):
try:
final_list, donor_list = gale_shapley("static/donors.csv", "static/patients.csv")
return render(request, "OrganMatching/result.html", {"final_list": final_list})
except:
return render(request, "OrganMatching/notadmin.html")
def resultcsv(request):
try:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="results.csv"'
final_list, donor_list = gale_shapley("static/donors.csv", "static/patients.csv")
writer = csv.writer(response)
writer.writerows(final_list)
return response
except:
return render(request, "OrganMatching/notadmin.html")
def upload(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
if len(request.FILES) != 2:
return render(request, "OrganMatching/admin.html", {"Error": "Choose Both Files!"})
improvise(request.FILES['file1'], request.FILES['file2'])
return render(request, "OrganMatching/uploaded.html")
def saved(request):
if request.method == 'GET':
return render(request, "OrganMatching/lost.html")
if request.method == 'POST':
post_data = request.POST
error = is_correct(post_data)
if error == "None":
edit_csv(post_data)
warn = ""
if int(post_data.get("Age")) < 18:
warn = "Your age is below the legally permitted age, but the form was submitted!"
return render(request, "OrganMatching/saved.html", {"Warn": warn})
else:
user_id = post_data.get("User_ID")
username = post_data.get("Username")
patient = [user_id, post_data.get("Name"), post_data.get("Blood_Report"), post_data.get("Age"), post_data.get("Blood_Group"), post_data.get("Rhesus_Factor")]
for i in range(len(post_data) - 8):
patient.append(post_data.get("Organ_Requirement" + str(i + 1)))
donors = get_donors()
return render(request, "OrganMatching/index.html", {"Username": username, "User_ID": user_id, "patient": patient, "donors": donors, "blood_groups": blood_groups, "rhesus_factors": rhesus_factors, "reports": reports, "range":range(len(patient) - 6), "orgreq":patient[6:], "Error": error})
| de | 0.749618 | ############### UNCOMMENT FOR NO AUTHENTICATION ################ ################################################################ | 2.202374 | 2 |
django_airavata/wagtailapps/base/models.py | vivekshresta/airavata-django-portal | 0 | 6633053 | from __future__ import unicode_literals
import os
from django.db import models
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.admin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
ObjectList,
PageChooserPanel,
StreamFieldPanel,
TabbedInterface
)
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.models import register_snippet
from .blocks import BaseStreamBlock, CssStreamBlock, Nav
@register_snippet
class Announcements(models.Model):
"""
This provides editable text for the site announcements. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
announcement_text = models.CharField(
max_length=255,
help_text='Provide an announcement text',
default='Announcement Text'
)
announcement_link = models.CharField(
max_length=255,
help_text='Give a redirect link for announcement',
default='Announcement Link'
)
panels = [
FieldPanel('announcement_text'),
FieldPanel('announcement_link'),
]
def __str__(self):
return "Announcement"
class Meta:
verbose_name_plural = 'Announcement'
@register_snippet
class NavExtra(models.Model):
"""
This provides editable text for the site extra navbar which comes below
the main navbar. Again it uses the decorator `register_snippet` to allow
it to be accessible via the admin. It is made accessible on the template
via a template tag defined in base/templatetags/navigation_tags.py
"""
nav = StreamField([
('nav', Nav(max_num=1)),
])
panels = [
StreamFieldPanel('nav'),
]
def __str__(self):
return "Nav extra"
class Meta:
verbose_name_plural = 'Nav extra'
@register_snippet
class CustomCss(models.Model):
"""
Custom CSS
"""
css = StreamField(
CssStreamBlock(),
verbose_name="CSS block",
blank=True,
null=True,
help_text="Write custom css and give comments as necessary",
default="")
panels = [
StreamFieldPanel('css'),
]
def __str__(self):
return "Custom Css"
class Meta:
verbose_name_plural = 'Custom CSS'
@register_snippet
class FooterText(models.Model):
"""
This provides editable text for the site footer. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
footer = StreamField(
BaseStreamBlock(),
verbose_name="Footer content block",
blank=True,
null=True)
panels = [
StreamFieldPanel('footer'),
]
def __str__(self):
return "Footer"
class Meta:
verbose_name_plural = 'Footer'
@register_snippet
class Navbar(models.Model):
"""
This provides editable text for the site header title. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Brand Logo'
)
logo_redirect_link = models.CharField(
max_length=255,
help_text='Provide a redirection link for the logo or logo text Eg. (https://www.google.com/)',
null=True,
blank=True,
default='#',
)
boolean_choices = (
("yes", "Yes"),
("no", "No")
)
logo_with_text = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want to display the text next to the logo",
default="no")
logo_width = models.IntegerField(
help_text='Provide a width for the logo',
null=True,
blank=True,
default='144',
)
logo_height = models.IntegerField(
help_text='Provide a height for the logo',
null=True,
blank=True,
default='43'
)
logo_text = models.CharField(
max_length=255,
help_text='Give a title text as an alternative to logo. Eg.(SEAGRID)',
null=True,
blank=True,
)
logo_text_color = models.CharField(
max_length=100,
help_text='Give a color for logo text if you have a logo text Eg.(#FFFFFF)',
null=True,
blank=True,
)
logo_text_size = models.IntegerField(
help_text='Give a text size as number of pixels Eg.(30)',
null=True,
blank=True,
)
panels = [
ImageChooserPanel('logo'),
FieldPanel('logo_redirect_link'),
FieldPanel('logo_width'),
FieldPanel('logo_height'),
FieldPanel('logo_text'),
FieldPanel('logo_with_text'),
FieldPanel('logo_text_size'),
FieldPanel('logo_text_color'),
]
def __str__(self):
return "Navbar"
class Meta:
verbose_name_plural = 'Navbar'
@register_snippet
class CustomHeaderLinks(models.Model):
"""
This provides feasibility for custom links inside header. Otherwise headerlinks are generated dynamically when a new page is created. The sublinks are restricted to 4 per link
"""
header_link_text = models.CharField(
max_length=25,
help_text='Give a Link text',
)
header_link = models.CharField(
max_length=255,
help_text='Provide a redirect Link',
null=True,
blank=True,
)
header_sub_link_text1 = models.CharField(
max_length=25,
help_text='Give a Sub Link 1 text',
null=True,
blank=True,
)
header_sub_link_text2 = models.CharField(
max_length=25,
help_text='Give a Sub Link 2 text',
null=True,
blank=True,
)
header_sub_link_text3 = models.CharField(
max_length=25,
help_text='Give a Sub Link 3 text',
null=True,
blank=True,
)
header_sub_link_text4 = models.CharField(
max_length=25,
help_text='Give a Sub Link 4 text',
null=True,
blank=True,
)
header_sub_link1 = models.CharField(
max_length=255,
help_text='Provide a redirect Link for sublink 1',
null=True,
blank=True,
)
header_sub_link2 = models.CharField(
max_length=255,
help_text='Provide a redirect Link for sublink 2',
null=True,
blank=True,
)
header_sub_link3 = models.CharField(
max_length=255,
help_text='Provide a redirect Link for sublink 3',
null=True,
blank=True,
)
header_sub_link4 = models.CharField(
max_length=255,
help_text='Provide a redirect Link for sublink 4',
null=True,
blank=True,
)
body = models.CharField(
max_length=255,
help_text='Give a title text',
null=True,
blank=True,
)
panels = [
FieldPanel('header_link_text'),
FieldPanel('header_link'),
MultiFieldPanel([
MultiFieldPanel([
FieldPanel('header_sub_link_text1'),
FieldPanel('header_sub_link1'),
]),
MultiFieldPanel([
FieldPanel('header_sub_link_text2'),
FieldPanel('header_sub_link2'),
]),
MultiFieldPanel([
FieldPanel('header_sub_link_text3'),
FieldPanel('header_sub_link3'),
]),
MultiFieldPanel([
FieldPanel('header_sub_link_text4'),
FieldPanel('header_sub_link4'),
])
], heading="Sub Links section", classname="collapsible"),
]
def __str__(self):
return "Header Custom Links"
class Meta:
verbose_name_plural = 'Header Custom Links'
@register_snippet
class GatewayIcon(models.Model):
"""
Image icon displayed in the header for logged in users.
"""
icon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Choose Gateway Icon with dimensions 70x70'
)
background_color = models.CharField(
max_length=10,
default="#EEEEEE",
help_text='Background color for icon (e.g. #FFFFFF)',
)
panels = [
ImageChooserPanel('icon'),
FieldPanel('background_color'),
]
def __str__(self):
return "Gateway Icon"
class Meta:
verbose_name_plural = 'Gateway Icon'
@register_snippet
class GatewayTitle(models.Model):
"""
Title displayed in the header for logged in users.
"""
title_text = models.CharField(
max_length=100,
help_text='Title to display to logged in users.',
)
panels = [
FieldPanel('title_text'),
]
def __str__(self):
return "Gateway Title: {}".format(self.title_text)
class Meta:
verbose_name_plural = 'Gateway Title'
@register_snippet
class ExtraWebResources(ClusterableModel):
"""
Links to CSS and JavaScript to be included in all pages.
"""
panels = [
InlinePanel('css_links', label="CSS Links"),
InlinePanel('js_links', label="JS Links"),
]
def __str__(self):
try:
return "Extra Web Resources: {}".format(", ".join(
[os.path.basename(l.url) for l in self.css_links.all()] +
[os.path.basename(l.url) for l in self.js_links.all()]))
except Exception:
return "Extra Web Resources"
class Meta:
verbose_name_plural = 'Extra Web Resources'
class CssLink(Orderable):
url = models.CharField(
max_length=255,
help_text='URL of CSS stylesheet.'
)
panels = [
FieldPanel('url'),
]
extra_web_resources = ParentalKey(ExtraWebResources,
on_delete=models.CASCADE,
related_name="css_links")
class Meta:
verbose_name = 'CSS Link'
class JsLink(Orderable):
url = models.CharField(
max_length=255,
help_text='URL of JavaScript script.'
)
panels = [
FieldPanel('url'),
]
extra_web_resources = ParentalKey(ExtraWebResources,
on_delete=models.CASCADE,
related_name="js_links")
class Meta:
verbose_name = 'JS Link'
class HomePage(Page):
"""
The Home Page. This looks slightly more complicated than it is. You can
see if you visit your site and edit the homepage that it is split between
a:
- Hero area
- Body area
- A promotional area
- Moveable featured site sections
"""
# Hero section of HomePage
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Homepage image'
)
hero_text = models.CharField(
max_length=255,
help_text='Write an introduction for the bakery',
null=True,
blank=True,
)
hero_cta = models.CharField(
verbose_name='Hero CTA',
max_length=255,
help_text='Text to display on Call to Action',
null=True,
blank=True,
)
hero_cta_link = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
verbose_name='Hero CTA link',
help_text='Choose a page to link to for the Call to Action'
)
# Body section of the HomePage
body = StreamField(
BaseStreamBlock(),
verbose_name="Home content block",
blank=True,
null=True)
# Promo section of the HomePage
site_logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Site Logo'
)
features_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature description'
)
feature_logo_1 = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Feature Logo 1'
)
feature_1_title = models.CharField(
max_length=255,
help_text='Feature Title 1'
)
feature_1_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature 1 text description'
)
feature_logo_2 = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Feature Logo 2'
)
feature_2_title = models.CharField(
max_length=255,
help_text='Feature Title 2'
)
feature_2_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature 2 text description'
)
feature_logo_3 = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Feature Logo 3'
)
feature_3_title = models.CharField(
max_length=255,
help_text='Feature Title 3'
)
feature_3_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature 3 text description'
)
feature_logo_4 = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Feature Logo 4'
)
feature_4_title = models.CharField(
max_length=255,
help_text='Feature Title 4'
)
feature_4_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature 4 text description'
)
custom_body_message = RichTextField(
null=True,
blank=True,
help_text='Write some custom body message!'
)
banner_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Choose Banner Image'
)
boolean_choices = (
("yes", "Yes"),
("no", "No")
)
show_navbar = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want to display the navbar on home page and no if you don't want to.",
default=True)
show_nav_extra = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the secondary navbar to show on home page or no if you don't want to",
default=True)
show_footer = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the Footer to show on home page or no if you don't want to",
default="yes")
content_panels = Page.content_panels + [
MultiFieldPanel([
ImageChooserPanel('image'),
FieldPanel('hero_text', classname="full"),
MultiFieldPanel([
FieldPanel('hero_cta'),
PageChooserPanel('hero_cta_link'),
])
], heading="Hero section"),
StreamFieldPanel('body'),
MultiFieldPanel([
ImageChooserPanel('site_logo'),
FieldPanel('features_text'),
MultiFieldPanel([
ImageChooserPanel('feature_logo_1'),
FieldPanel('feature_1_title'),
FieldPanel('feature_1_text'),
]),
MultiFieldPanel([
ImageChooserPanel('feature_logo_2'),
FieldPanel('feature_2_title'),
FieldPanel('feature_2_text'),
]),
MultiFieldPanel([
ImageChooserPanel('feature_logo_3'),
FieldPanel('feature_3_title'),
FieldPanel('feature_3_text'),
]),
MultiFieldPanel([
ImageChooserPanel('feature_logo_4'),
FieldPanel('feature_4_title'),
FieldPanel('feature_4_text'),
])
], heading="Feature section", classname="collapsible"),
FieldPanel('custom_body_message'),
ImageChooserPanel('banner_image')
]
customization_panels = [
FieldPanel('show_navbar'),
FieldPanel('show_nav_extra'),
FieldPanel('show_footer')
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(customization_panels, heading='Customization'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings',
classname="settings"),
])
def __str__(self):
return self.title
class Row(models.Model):
body = StreamField(
BaseStreamBlock(), verbose_name="Row Content", blank=True, null=True
)
panels = [
StreamFieldPanel('body'),
]
class Meta:
abstract = True
class RowBlankPageRelation(Orderable, Row):
page = ParentalKey('django_airavata_wagtail_base.BlankPage',
on_delete=models.CASCADE, related_name='row')
class BlankPage(Page):
"""
The Blank Template Page. You can see if you visit your site and edit the blank page. Used to create free form content
"""
boolean_choices = (
("yes", "Yes"),
("no", "No")
)
show_navbar = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want to display the navbar on home page and no if you don't want to.",
default="yes")
show_nav_extra = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the secondary navbar to show on home page or no if you don't want to",
default="yes")
show_footer = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the Footer to show on home page or no if you don't want to",
default="yes")
show_announcements = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the Announcements to show up on home page or no if you don't want to",
default="yes")
content_panels = Page.content_panels + [
InlinePanel("row", label="row")
]
customization_panels = [
FieldPanel('show_navbar'),
FieldPanel('show_nav_extra'),
FieldPanel('show_footer'),
FieldPanel('show_announcements')
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(customization_panels, heading='Customization'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings',
classname="settings"),
])
def __str__(self):
return self.title
class RowCybergatewayHomePageRelation(Orderable, Row):
page = ParentalKey('django_airavata_wagtail_base.CybergatewayHomePage',
on_delete=models.CASCADE, related_name='row')
class CybergatewayHomePage(Page):
"""
The Cybergateway themed template Page
"""
# Hero section of HomePage
site_logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Site Logo Image'
)
site_link = models.CharField(
max_length=255,
default="#",
help_text='Give a site redirect link',
)
site_text = models.CharField(
max_length=50,
default="#",
help_text='Give a Site Name',
)
site_header = models.CharField(
max_length=70,
default="#",
help_text='Give a Site Header Name',
)
site_link1 = models.CharField(
max_length=70,
default="#",
help_text='Give a Site Nav Link [1]',
)
site_link_text1 = models.CharField(
max_length=70,
help_text='Give a Site Nav Link Text [1]',
)
site_link2 = models.CharField(
max_length=70,
default='#',
help_text='Give a Site Nav Link [2]',
)
site_link_text2 = models.CharField(
max_length=70,
help_text='Give a Site Nav Link Text [2]',
)
site_link3 = models.CharField(
max_length=70,
default="#",
help_text='Give a Site Nav Link [3]',
)
site_link_text3 = models.CharField(
max_length=70,
help_text='Give a Site Nav Link Text [3]',
)
contact = StreamField(
BaseStreamBlock(),
verbose_name="Contact Info Block",
blank=True,
null=True)
footer = StreamField(
BaseStreamBlock(),
verbose_name="Footer Content Block",
blank=True,
null=True)
boolean_choices = (
("yes", "Yes"),
("no", "No")
)
show_navbar = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want to display the navbar on home page and no if you don't want to.",
default="yes")
show_nav_extra = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the secondary navbar to show on home page or no if you don't want to",
default="yes")
show_footer = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the Footer to show on home page or no if you don't want to",
default="yes")
content_panels = Page.content_panels + [
MultiFieldPanel([
ImageChooserPanel('site_logo'),
FieldPanel('site_link'),
FieldPanel('site_text'),
FieldPanel('site_header'),
FieldPanel('site_link1'),
FieldPanel('site_link_text1'),
FieldPanel('site_link2'),
FieldPanel('site_link_text2'),
FieldPanel('site_link3'),
FieldPanel('site_link_text3'),
], heading="Navbar Section"),
InlinePanel("row", label="row"),
StreamFieldPanel('contact'),
StreamFieldPanel('footer'),
]
customization_panels = [
FieldPanel('show_navbar'),
FieldPanel('show_nav_extra'),
FieldPanel('show_footer'),
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(customization_panels, heading='Customization'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings',
classname="settings"),
])
def __str__(self):
return self.title
| from __future__ import unicode_literals
import os
from django.db import models
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
from wagtail.admin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
ObjectList,
PageChooserPanel,
StreamFieldPanel,
TabbedInterface
)
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.snippets.models import register_snippet
from .blocks import BaseStreamBlock, CssStreamBlock, Nav
@register_snippet
class Announcements(models.Model):
"""
This provides editable text for the site announcements. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
announcement_text = models.CharField(
max_length=255,
help_text='Provide an announcement text',
default='Announcement Text'
)
announcement_link = models.CharField(
max_length=255,
help_text='Give a redirect link for announcement',
default='Announcement Link'
)
panels = [
FieldPanel('announcement_text'),
FieldPanel('announcement_link'),
]
def __str__(self):
return "Announcement"
class Meta:
verbose_name_plural = 'Announcement'
@register_snippet
class NavExtra(models.Model):
"""
This provides editable text for the site extra navbar which comes below
the main navbar. Again it uses the decorator `register_snippet` to allow
it to be accessible via the admin. It is made accessible on the template
via a template tag defined in base/templatetags/navigation_tags.py
"""
nav = StreamField([
('nav', Nav(max_num=1)),
])
panels = [
StreamFieldPanel('nav'),
]
def __str__(self):
return "Nav extra"
class Meta:
verbose_name_plural = 'Nav extra'
@register_snippet
class CustomCss(models.Model):
"""
Custom CSS
"""
css = StreamField(
CssStreamBlock(),
verbose_name="CSS block",
blank=True,
null=True,
help_text="Write custom css and give comments as necessary",
default="")
panels = [
StreamFieldPanel('css'),
]
def __str__(self):
return "Custom Css"
class Meta:
verbose_name_plural = 'Custom CSS'
@register_snippet
class FooterText(models.Model):
"""
This provides editable text for the site footer. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
footer = StreamField(
BaseStreamBlock(),
verbose_name="Footer content block",
blank=True,
null=True)
panels = [
StreamFieldPanel('footer'),
]
def __str__(self):
return "Footer"
class Meta:
verbose_name_plural = 'Footer'
@register_snippet
class Navbar(models.Model):
"""
This provides editable text for the site header title. Again it uses the decorator
`register_snippet` to allow it to be accessible via the admin. It is made
accessible on the template via a template tag defined in base/templatetags/
navigation_tags.py
"""
logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Brand Logo'
)
logo_redirect_link = models.CharField(
max_length=255,
help_text='Provide a redirection link for the logo or logo text Eg. (https://www.google.com/)',
null=True,
blank=True,
default='#',
)
boolean_choices = (
("yes", "Yes"),
("no", "No")
)
logo_with_text = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want to display the text next to the logo",
default="no")
logo_width = models.IntegerField(
help_text='Provide a width for the logo',
null=True,
blank=True,
default='144',
)
logo_height = models.IntegerField(
help_text='Provide a height for the logo',
null=True,
blank=True,
default='43'
)
logo_text = models.CharField(
max_length=255,
help_text='Give a title text as an alternative to logo. Eg.(SEAGRID)',
null=True,
blank=True,
)
logo_text_color = models.CharField(
max_length=100,
help_text='Give a color for logo text if you have a logo text Eg.(#FFFFFF)',
null=True,
blank=True,
)
logo_text_size = models.IntegerField(
help_text='Give a text size as number of pixels Eg.(30)',
null=True,
blank=True,
)
panels = [
ImageChooserPanel('logo'),
FieldPanel('logo_redirect_link'),
FieldPanel('logo_width'),
FieldPanel('logo_height'),
FieldPanel('logo_text'),
FieldPanel('logo_with_text'),
FieldPanel('logo_text_size'),
FieldPanel('logo_text_color'),
]
def __str__(self):
return "Navbar"
class Meta:
verbose_name_plural = 'Navbar'
@register_snippet
class CustomHeaderLinks(models.Model):
"""
This provides feasibility for custom links inside header. Otherwise headerlinks are generated dynamically when a new page is created. The sublinks are restricted to 4 per link
"""
header_link_text = models.CharField(
max_length=25,
help_text='Give a Link text',
)
header_link = models.CharField(
max_length=255,
help_text='Provide a redirect Link',
null=True,
blank=True,
)
header_sub_link_text1 = models.CharField(
max_length=25,
help_text='Give a Sub Link 1 text',
null=True,
blank=True,
)
header_sub_link_text2 = models.CharField(
max_length=25,
help_text='Give a Sub Link 2 text',
null=True,
blank=True,
)
header_sub_link_text3 = models.CharField(
max_length=25,
help_text='Give a Sub Link 3 text',
null=True,
blank=True,
)
header_sub_link_text4 = models.CharField(
max_length=25,
help_text='Give a Sub Link 4 text',
null=True,
blank=True,
)
header_sub_link1 = models.CharField(
max_length=255,
help_text='Provide a redirect Link for sublink 1',
null=True,
blank=True,
)
header_sub_link2 = models.CharField(
max_length=255,
help_text='Provide a redirect Link for sublink 2',
null=True,
blank=True,
)
header_sub_link3 = models.CharField(
max_length=255,
help_text='Provide a redirect Link for sublink 3',
null=True,
blank=True,
)
header_sub_link4 = models.CharField(
max_length=255,
help_text='Provide a redirect Link for sublink 4',
null=True,
blank=True,
)
body = models.CharField(
max_length=255,
help_text='Give a title text',
null=True,
blank=True,
)
panels = [
FieldPanel('header_link_text'),
FieldPanel('header_link'),
MultiFieldPanel([
MultiFieldPanel([
FieldPanel('header_sub_link_text1'),
FieldPanel('header_sub_link1'),
]),
MultiFieldPanel([
FieldPanel('header_sub_link_text2'),
FieldPanel('header_sub_link2'),
]),
MultiFieldPanel([
FieldPanel('header_sub_link_text3'),
FieldPanel('header_sub_link3'),
]),
MultiFieldPanel([
FieldPanel('header_sub_link_text4'),
FieldPanel('header_sub_link4'),
])
], heading="Sub Links section", classname="collapsible"),
]
def __str__(self):
return "Header Custom Links"
class Meta:
verbose_name_plural = 'Header Custom Links'
@register_snippet
class GatewayIcon(models.Model):
"""
Image icon displayed in the header for logged in users.
"""
icon = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Choose Gateway Icon with dimensions 70x70'
)
background_color = models.CharField(
max_length=10,
default="#EEEEEE",
help_text='Background color for icon (e.g. #FFFFFF)',
)
panels = [
ImageChooserPanel('icon'),
FieldPanel('background_color'),
]
def __str__(self):
return "Gateway Icon"
class Meta:
verbose_name_plural = 'Gateway Icon'
@register_snippet
class GatewayTitle(models.Model):
"""
Title displayed in the header for logged in users.
"""
title_text = models.CharField(
max_length=100,
help_text='Title to display to logged in users.',
)
panels = [
FieldPanel('title_text'),
]
def __str__(self):
return "Gateway Title: {}".format(self.title_text)
class Meta:
verbose_name_plural = 'Gateway Title'
@register_snippet
class ExtraWebResources(ClusterableModel):
"""
Links to CSS and JavaScript to be included in all pages.
"""
panels = [
InlinePanel('css_links', label="CSS Links"),
InlinePanel('js_links', label="JS Links"),
]
def __str__(self):
try:
return "Extra Web Resources: {}".format(", ".join(
[os.path.basename(l.url) for l in self.css_links.all()] +
[os.path.basename(l.url) for l in self.js_links.all()]))
except Exception:
return "Extra Web Resources"
class Meta:
verbose_name_plural = 'Extra Web Resources'
class CssLink(Orderable):
url = models.CharField(
max_length=255,
help_text='URL of CSS stylesheet.'
)
panels = [
FieldPanel('url'),
]
extra_web_resources = ParentalKey(ExtraWebResources,
on_delete=models.CASCADE,
related_name="css_links")
class Meta:
verbose_name = 'CSS Link'
class JsLink(Orderable):
url = models.CharField(
max_length=255,
help_text='URL of JavaScript script.'
)
panels = [
FieldPanel('url'),
]
extra_web_resources = ParentalKey(ExtraWebResources,
on_delete=models.CASCADE,
related_name="js_links")
class Meta:
verbose_name = 'JS Link'
class HomePage(Page):
"""
The Home Page. This looks slightly more complicated than it is. You can
see if you visit your site and edit the homepage that it is split between
a:
- Hero area
- Body area
- A promotional area
- Moveable featured site sections
"""
# Hero section of HomePage
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Homepage image'
)
hero_text = models.CharField(
max_length=255,
help_text='Write an introduction for the bakery',
null=True,
blank=True,
)
hero_cta = models.CharField(
verbose_name='Hero CTA',
max_length=255,
help_text='Text to display on Call to Action',
null=True,
blank=True,
)
hero_cta_link = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
verbose_name='Hero CTA link',
help_text='Choose a page to link to for the Call to Action'
)
# Body section of the HomePage
body = StreamField(
BaseStreamBlock(),
verbose_name="Home content block",
blank=True,
null=True)
# Promo section of the HomePage
site_logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Site Logo'
)
features_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature description'
)
feature_logo_1 = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Feature Logo 1'
)
feature_1_title = models.CharField(
max_length=255,
help_text='Feature Title 1'
)
feature_1_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature 1 text description'
)
feature_logo_2 = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Feature Logo 2'
)
feature_2_title = models.CharField(
max_length=255,
help_text='Feature Title 2'
)
feature_2_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature 2 text description'
)
feature_logo_3 = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Feature Logo 3'
)
feature_3_title = models.CharField(
max_length=255,
help_text='Feature Title 3'
)
feature_3_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature 3 text description'
)
feature_logo_4 = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Feature Logo 4'
)
feature_4_title = models.CharField(
max_length=255,
help_text='Feature Title 4'
)
feature_4_text = RichTextField(
null=True,
blank=True,
help_text='Write some feature 4 text description'
)
custom_body_message = RichTextField(
null=True,
blank=True,
help_text='Write some custom body message!'
)
banner_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Choose Banner Image'
)
boolean_choices = (
("yes", "Yes"),
("no", "No")
)
show_navbar = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want to display the navbar on home page and no if you don't want to.",
default=True)
show_nav_extra = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the secondary navbar to show on home page or no if you don't want to",
default=True)
show_footer = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the Footer to show on home page or no if you don't want to",
default="yes")
content_panels = Page.content_panels + [
MultiFieldPanel([
ImageChooserPanel('image'),
FieldPanel('hero_text', classname="full"),
MultiFieldPanel([
FieldPanel('hero_cta'),
PageChooserPanel('hero_cta_link'),
])
], heading="Hero section"),
StreamFieldPanel('body'),
MultiFieldPanel([
ImageChooserPanel('site_logo'),
FieldPanel('features_text'),
MultiFieldPanel([
ImageChooserPanel('feature_logo_1'),
FieldPanel('feature_1_title'),
FieldPanel('feature_1_text'),
]),
MultiFieldPanel([
ImageChooserPanel('feature_logo_2'),
FieldPanel('feature_2_title'),
FieldPanel('feature_2_text'),
]),
MultiFieldPanel([
ImageChooserPanel('feature_logo_3'),
FieldPanel('feature_3_title'),
FieldPanel('feature_3_text'),
]),
MultiFieldPanel([
ImageChooserPanel('feature_logo_4'),
FieldPanel('feature_4_title'),
FieldPanel('feature_4_text'),
])
], heading="Feature section", classname="collapsible"),
FieldPanel('custom_body_message'),
ImageChooserPanel('banner_image')
]
customization_panels = [
FieldPanel('show_navbar'),
FieldPanel('show_nav_extra'),
FieldPanel('show_footer')
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(customization_panels, heading='Customization'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings',
classname="settings"),
])
def __str__(self):
return self.title
class Row(models.Model):
body = StreamField(
BaseStreamBlock(), verbose_name="Row Content", blank=True, null=True
)
panels = [
StreamFieldPanel('body'),
]
class Meta:
abstract = True
class RowBlankPageRelation(Orderable, Row):
page = ParentalKey('django_airavata_wagtail_base.BlankPage',
on_delete=models.CASCADE, related_name='row')
class BlankPage(Page):
"""
The Blank Template Page. You can see if you visit your site and edit the blank page. Used to create free form content
"""
boolean_choices = (
("yes", "Yes"),
("no", "No")
)
show_navbar = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want to display the navbar on home page and no if you don't want to.",
default="yes")
show_nav_extra = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the secondary navbar to show on home page or no if you don't want to",
default="yes")
show_footer = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the Footer to show on home page or no if you don't want to",
default="yes")
show_announcements = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the Announcements to show up on home page or no if you don't want to",
default="yes")
content_panels = Page.content_panels + [
InlinePanel("row", label="row")
]
customization_panels = [
FieldPanel('show_navbar'),
FieldPanel('show_nav_extra'),
FieldPanel('show_footer'),
FieldPanel('show_announcements')
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(customization_panels, heading='Customization'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings',
classname="settings"),
])
def __str__(self):
return self.title
class RowCybergatewayHomePageRelation(Orderable, Row):
page = ParentalKey('django_airavata_wagtail_base.CybergatewayHomePage',
on_delete=models.CASCADE, related_name='row')
class CybergatewayHomePage(Page):
"""
The Cybergateway themed template Page
"""
# Hero section of HomePage
site_logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Site Logo Image'
)
site_link = models.CharField(
max_length=255,
default="#",
help_text='Give a site redirect link',
)
site_text = models.CharField(
max_length=50,
default="#",
help_text='Give a Site Name',
)
site_header = models.CharField(
max_length=70,
default="#",
help_text='Give a Site Header Name',
)
site_link1 = models.CharField(
max_length=70,
default="#",
help_text='Give a Site Nav Link [1]',
)
site_link_text1 = models.CharField(
max_length=70,
help_text='Give a Site Nav Link Text [1]',
)
site_link2 = models.CharField(
max_length=70,
default='#',
help_text='Give a Site Nav Link [2]',
)
site_link_text2 = models.CharField(
max_length=70,
help_text='Give a Site Nav Link Text [2]',
)
site_link3 = models.CharField(
max_length=70,
default="#",
help_text='Give a Site Nav Link [3]',
)
site_link_text3 = models.CharField(
max_length=70,
help_text='Give a Site Nav Link Text [3]',
)
contact = StreamField(
BaseStreamBlock(),
verbose_name="Contact Info Block",
blank=True,
null=True)
footer = StreamField(
BaseStreamBlock(),
verbose_name="Footer Content Block",
blank=True,
null=True)
boolean_choices = (
("yes", "Yes"),
("no", "No")
)
show_navbar = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want to display the navbar on home page and no if you don't want to.",
default="yes")
show_nav_extra = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the secondary navbar to show on home page or no if you don't want to",
default="yes")
show_footer = models.CharField(
choices=boolean_choices,
max_length=5,
help_text="Choose yes if you want the Footer to show on home page or no if you don't want to",
default="yes")
content_panels = Page.content_panels + [
MultiFieldPanel([
ImageChooserPanel('site_logo'),
FieldPanel('site_link'),
FieldPanel('site_text'),
FieldPanel('site_header'),
FieldPanel('site_link1'),
FieldPanel('site_link_text1'),
FieldPanel('site_link2'),
FieldPanel('site_link_text2'),
FieldPanel('site_link3'),
FieldPanel('site_link_text3'),
], heading="Navbar Section"),
InlinePanel("row", label="row"),
StreamFieldPanel('contact'),
StreamFieldPanel('footer'),
]
customization_panels = [
FieldPanel('show_navbar'),
FieldPanel('show_nav_extra'),
FieldPanel('show_footer'),
]
edit_handler = TabbedInterface([
ObjectList(content_panels, heading='Content'),
ObjectList(customization_panels, heading='Customization'),
ObjectList(Page.promote_panels, heading='Promote'),
ObjectList(Page.settings_panels, heading='Settings',
classname="settings"),
])
def __str__(self):
return self.title
| en | 0.700359 | This provides editable text for the site announcements. Again it uses the decorator `register_snippet` to allow it to be accessible via the admin. It is made accessible on the template via a template tag defined in base/templatetags/ navigation_tags.py This provides editable text for the site extra navbar which comes below the main navbar. Again it uses the decorator `register_snippet` to allow it to be accessible via the admin. It is made accessible on the template via a template tag defined in base/templatetags/navigation_tags.py Custom CSS This provides editable text for the site footer. Again it uses the decorator `register_snippet` to allow it to be accessible via the admin. It is made accessible on the template via a template tag defined in base/templatetags/ navigation_tags.py This provides editable text for the site header title. Again it uses the decorator `register_snippet` to allow it to be accessible via the admin. It is made accessible on the template via a template tag defined in base/templatetags/ navigation_tags.py #FFFFFF)', This provides feasibility for custom links inside header. Otherwise headerlinks are generated dynamically when a new page is created. The sublinks are restricted to 4 per link Image icon displayed in the header for logged in users. #FFFFFF)', Title displayed in the header for logged in users. Links to CSS and JavaScript to be included in all pages. The Home Page. This looks slightly more complicated than it is. You can see if you visit your site and edit the homepage that it is split between a: - Hero area - Body area - A promotional area - Moveable featured site sections # Hero section of HomePage # Body section of the HomePage # Promo section of the HomePage The Blank Template Page. You can see if you visit your site and edit the blank page. Used to create free form content The Cybergateway themed template Page # Hero section of HomePage | 2.042341 | 2 |
assignments/assignment2/model.py | kstepanovdev/dlcourse_ai | 0 | 6633054 | <filename>assignments/assignment2/model.py
import numpy as np
from layers import FullyConnectedLayer, ReLULayer, softmax_with_cross_entropy, l2_regularization
class TwoLayerNet:
""" Neural network with two fully connected layers """
def __init__(self, n_input, n_output, hidden_layer_size, reg):
"""
Initializes the neural network
Arguments:
n_input, int - dimension of the model input
n_output, int - number of classes to predict
hidden_layer_size, int - number of neurons in the hidden layer
reg, float - L2 regularization strength
"""
self.reg = reg
input_layer = FullyConnectedLayer(n_input, hidden_layer_size)
relu_layer = ReLULayer()
hidden_layer = FullyConnectedLayer(hidden_layer_size, n_output)
self.layers = [input_layer, relu_layer, hidden_layer]
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients
on a batch of training examples
Arguments:
X, np array (batch_size, input_features) - input data
y, np array of int (batch_size) - classes
"""
# Before running forward and backward pass through the model,
# clear parameter gradients aggregated from the previous pass
# TODO Set parameter gradient to zeros
# Hint: using self.params() might be useful!
# raise Exception("Not implemented!")
for param, value in self.params().items():
value.grad = np.zeros_like(value.grad)
preds = X
for layer in self.layers:
preds = layer.forward(preds)
loss, grad = softmax_with_cross_entropy(preds, y)
for layer in reversed(self.layers):
grad = layer.backward(grad)
for param_name, param in self.params().items():
if 'W' in param_name:
l2_loss, l2_grad = l2_regularization(param.value, self.reg)
param.grad += l2_grad
loss += l2_loss
# TODO Compute loss and fill param gradients
# by running forward and backward passes through the model
# After that, implement l2 regularization on all params
# Hint: self.params() is useful again!
# raise Exception("Not implemented!")
return loss
def predict(self, X):
"""
Produces classifier predictions on the set
Arguments:
X, np array (test_samples, num_features)
Returns:
y_pred, np.array of int (test_samples)
"""
# TODO: Implement predict
# Hint: some of the code of the compute_loss_and_gradients
# can be reused
output = X
for layer in self.layers:
output = layer.forward(output)
pred = np.argmax(output, axis=1)
return pred
def params(self):
result = {}
for index, layer in enumerate(self.layers):
for param, value in layer.params().items():
result[str(index) + param] = value
return result
| <filename>assignments/assignment2/model.py
import numpy as np
from layers import FullyConnectedLayer, ReLULayer, softmax_with_cross_entropy, l2_regularization
class TwoLayerNet:
""" Neural network with two fully connected layers """
def __init__(self, n_input, n_output, hidden_layer_size, reg):
"""
Initializes the neural network
Arguments:
n_input, int - dimension of the model input
n_output, int - number of classes to predict
hidden_layer_size, int - number of neurons in the hidden layer
reg, float - L2 regularization strength
"""
self.reg = reg
input_layer = FullyConnectedLayer(n_input, hidden_layer_size)
relu_layer = ReLULayer()
hidden_layer = FullyConnectedLayer(hidden_layer_size, n_output)
self.layers = [input_layer, relu_layer, hidden_layer]
def compute_loss_and_gradients(self, X, y):
"""
Computes total loss and updates parameter gradients
on a batch of training examples
Arguments:
X, np array (batch_size, input_features) - input data
y, np array of int (batch_size) - classes
"""
# Before running forward and backward pass through the model,
# clear parameter gradients aggregated from the previous pass
# TODO Set parameter gradient to zeros
# Hint: using self.params() might be useful!
# raise Exception("Not implemented!")
for param, value in self.params().items():
value.grad = np.zeros_like(value.grad)
preds = X
for layer in self.layers:
preds = layer.forward(preds)
loss, grad = softmax_with_cross_entropy(preds, y)
for layer in reversed(self.layers):
grad = layer.backward(grad)
for param_name, param in self.params().items():
if 'W' in param_name:
l2_loss, l2_grad = l2_regularization(param.value, self.reg)
param.grad += l2_grad
loss += l2_loss
# TODO Compute loss and fill param gradients
# by running forward and backward passes through the model
# After that, implement l2 regularization on all params
# Hint: self.params() is useful again!
# raise Exception("Not implemented!")
return loss
def predict(self, X):
"""
Produces classifier predictions on the set
Arguments:
X, np array (test_samples, num_features)
Returns:
y_pred, np.array of int (test_samples)
"""
# TODO: Implement predict
# Hint: some of the code of the compute_loss_and_gradients
# can be reused
output = X
for layer in self.layers:
output = layer.forward(output)
pred = np.argmax(output, axis=1)
return pred
def params(self):
result = {}
for index, layer in enumerate(self.layers):
for param, value in layer.params().items():
result[str(index) + param] = value
return result
| en | 0.595795 | Neural network with two fully connected layers Initializes the neural network Arguments: n_input, int - dimension of the model input n_output, int - number of classes to predict hidden_layer_size, int - number of neurons in the hidden layer reg, float - L2 regularization strength Computes total loss and updates parameter gradients on a batch of training examples Arguments: X, np array (batch_size, input_features) - input data y, np array of int (batch_size) - classes # Before running forward and backward pass through the model, # clear parameter gradients aggregated from the previous pass # TODO Set parameter gradient to zeros # Hint: using self.params() might be useful! # raise Exception("Not implemented!") # TODO Compute loss and fill param gradients # by running forward and backward passes through the model # After that, implement l2 regularization on all params # Hint: self.params() is useful again! # raise Exception("Not implemented!") Produces classifier predictions on the set Arguments: X, np array (test_samples, num_features) Returns: y_pred, np.array of int (test_samples) # TODO: Implement predict # Hint: some of the code of the compute_loss_and_gradients # can be reused | 3.675115 | 4 |
python/tetris/main.py | klenium/tetris | 2 | 6633055 | <gh_stars>1-10
from tetris.logic.TetrisGame import TetrisGame
from tetris.util.containers import Dimension
from tetris.view.GraphicGameFrame import GraphicGameFrame
def run_game(rows, cols, size, speed, controls):
grid_size = Dimension(cols, rows)
square_size = size
falling_speed = speed
frame = GraphicGameFrame(grid_size, square_size)
game = TetrisGame(grid_size, falling_speed)
game.board_state_change.on_change += frame.display_board
game.tetromino_state_change.on_change += frame.display_tetromino
game.game_over.on_change += frame.display_game_over
game.start()
frame.register_event_listeners(game, controls)
frame.show_window()
| from tetris.logic.TetrisGame import TetrisGame
from tetris.util.containers import Dimension
from tetris.view.GraphicGameFrame import GraphicGameFrame
def run_game(rows, cols, size, speed, controls):
grid_size = Dimension(cols, rows)
square_size = size
falling_speed = speed
frame = GraphicGameFrame(grid_size, square_size)
game = TetrisGame(grid_size, falling_speed)
game.board_state_change.on_change += frame.display_board
game.tetromino_state_change.on_change += frame.display_tetromino
game.game_over.on_change += frame.display_game_over
game.start()
frame.register_event_listeners(game, controls)
frame.show_window() | none | 1 | 2.825911 | 3 |
|
cri/intake.py | oldnick85/CResourceIntake | 0 | 6633056 | #!/usr/bin/env python3
import sys
import json
def main(args):
rsrcs = CResources(sys.argv)
rsrcs.proc()
return 0
class CResources:
resources_path = ""
sources_path = ""
namespace = None
def __init__(self, argv):
for arg in sys.argv:
if (arg.find("--resources-path=") == 0):
self.resources_path = arg.replace("--resources-path=", "") + "/"
if (arg.find("--sources-path=") == 0):
self.sources_path = arg.replace("--sources-path=", "") + "/"
return
def proc(self):
with open(self.resources_path+"resources.json", "r") as resources_file:
resources_all = json.load(resources_file)
print("resources_all=%s\n" % str(resources_all))
self.namespace = resources_all["namespace"]
resources = resources_all["resources"]
[code_h, code_cpp] = self.proc_resources(resources)
if (self.namespace):
code_h = ("namespace %s {\n" % self.namespace) + code_h + "\n}"
code_cpp = "#include \"resources.h\"\n\n" + ("using namespace %s;\n\n" % self.namespace) + code_cpp
code_h = "#include <array>\n" + code_h
file_h = open(self.sources_path+"resources.h", 'w')
file_h.write(code_h)
file_h.close()
file_cpp = open(self.sources_path+"resources.cpp", 'w')
file_cpp.write(code_cpp)
file_cpp.close()
return
def proc_resources(self, resources):
code_h = ""
code_cpp = ""
for resource in resources:
[h, cpp] = self.proc_resource(resource)
code_h += h
code_cpp += cpp
return [code_h, code_cpp]
def proc_resource(self, resource):
code_h = ""
code_cpp = ""
fname = resource["file"]
varname = resource["var"]
f = open(self.resources_path+fname, 'rb')
data = f.read()
code_h += "\nextern std::array<uint8_t, %d> %s;\n" % (len(data), varname)
code_cpp += "std::array<uint8_t, %d> %s::%s = {\n" % (len(data), self.namespace, varname)
for d in data:
code_cpp += (" 0x%02X," % d)
code_cpp += "\n};\n\n"
f.close()
return [code_h, code_cpp]
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| #!/usr/bin/env python3
import sys
import json
def main(args):
rsrcs = CResources(sys.argv)
rsrcs.proc()
return 0
class CResources:
resources_path = ""
sources_path = ""
namespace = None
def __init__(self, argv):
for arg in sys.argv:
if (arg.find("--resources-path=") == 0):
self.resources_path = arg.replace("--resources-path=", "") + "/"
if (arg.find("--sources-path=") == 0):
self.sources_path = arg.replace("--sources-path=", "") + "/"
return
def proc(self):
with open(self.resources_path+"resources.json", "r") as resources_file:
resources_all = json.load(resources_file)
print("resources_all=%s\n" % str(resources_all))
self.namespace = resources_all["namespace"]
resources = resources_all["resources"]
[code_h, code_cpp] = self.proc_resources(resources)
if (self.namespace):
code_h = ("namespace %s {\n" % self.namespace) + code_h + "\n}"
code_cpp = "#include \"resources.h\"\n\n" + ("using namespace %s;\n\n" % self.namespace) + code_cpp
code_h = "#include <array>\n" + code_h
file_h = open(self.sources_path+"resources.h", 'w')
file_h.write(code_h)
file_h.close()
file_cpp = open(self.sources_path+"resources.cpp", 'w')
file_cpp.write(code_cpp)
file_cpp.close()
return
def proc_resources(self, resources):
code_h = ""
code_cpp = ""
for resource in resources:
[h, cpp] = self.proc_resource(resource)
code_h += h
code_cpp += cpp
return [code_h, code_cpp]
def proc_resource(self, resource):
code_h = ""
code_cpp = ""
fname = resource["file"]
varname = resource["var"]
f = open(self.resources_path+fname, 'rb')
data = f.read()
code_h += "\nextern std::array<uint8_t, %d> %s;\n" % (len(data), varname)
code_cpp += "std::array<uint8_t, %d> %s::%s = {\n" % (len(data), self.namespace, varname)
for d in data:
code_cpp += (" 0x%02X," % d)
code_cpp += "\n};\n\n"
f.close()
return [code_h, code_cpp]
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| fr | 0.221828 | #!/usr/bin/env python3 | 2.761117 | 3 |
code_cracker (numbers).py | ThatGuyCalledJesse/brute_force_attack | 0 | 6633057 | <gh_stars>0
import random
import time
codePart1 = input('Enter the first part of your code: ')
codePart2 = input('Enter the second part of your code: ')
codePart3 = input('Enter the third part of your code: ')
codePart4 = input('Enter the fourth part of your code: ')
code = (codePart1, codePart2, codePart3, codePart4)
possible_code_parts = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0')
true = True
while true == True:
possible_code1 = random.choice(possible_code_parts)
possible_code2 = random.choice(possible_code_parts)
possible_code3 = random.choice(possible_code_parts)
possible_code4 = random.choice(possible_code_parts)
possible_code = (possible_code1, possible_code2, possible_code3, possible_code4)
print(possible_code)
if possible_code == code:
print(f'Password cracked: {code}')
bye_bye = input('Enter something to quit: ')
quit | import random
import time
codePart1 = input('Enter the first part of your code: ')
codePart2 = input('Enter the second part of your code: ')
codePart3 = input('Enter the third part of your code: ')
codePart4 = input('Enter the fourth part of your code: ')
code = (codePart1, codePart2, codePart3, codePart4)
possible_code_parts = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0')
true = True
while true == True:
possible_code1 = random.choice(possible_code_parts)
possible_code2 = random.choice(possible_code_parts)
possible_code3 = random.choice(possible_code_parts)
possible_code4 = random.choice(possible_code_parts)
possible_code = (possible_code1, possible_code2, possible_code3, possible_code4)
print(possible_code)
if possible_code == code:
print(f'Password cracked: {code}')
bye_bye = input('Enter something to quit: ')
quit | none | 1 | 3.622958 | 4 |
|
tests/bindings/test_node_path.py | garyo/godot-python | 0 | 6633058 | import pytest
from godot.bindings import NodePath, Vector3
class TestNodePath:
def test_equal(self):
v1 = NodePath("parent/child")
v2 = NodePath("parent/child")
assert v1 == v2
other = NodePath("parent/other_child")
assert not v1 == other # Force use of __eq__
@pytest.mark.parametrize(
"arg", [None, 0, "parent/child", NodePath("parent/other_child")]
)
def test_bad_equal(self, arg):
basis = NodePath("parent/child")
assert basis != arg
def test_repr(self):
v = NodePath("/root/leaf")
assert repr(v) == "<NodePath(path='/root/leaf')>"
@pytest.mark.parametrize("args", [(), (42,), (None,)])
def test_bad_build(self, args):
with pytest.raises(TypeError):
NodePath(*args)
@pytest.mark.parametrize(
"field,ret_type,params",
[
["get_name", str, (0,)],
["get_name_count", int, ()],
["get_concatenated_subnames", str, ()],
["get_subname", str, (0,)],
["get_subname_count", int, ()],
["is_absolute", bool, ()],
["is_empty", bool, ()],
],
ids=lambda x: x[0],
)
def test_methods(self, field, ret_type, params):
v = NodePath("/foo")
# Don't test methods' validity but bindings one
assert hasattr(v, field)
method = getattr(v, field)
assert callable(method)
ret = method(*params)
assert isinstance(ret, ret_type)
| import pytest
from godot.bindings import NodePath, Vector3
class TestNodePath:
def test_equal(self):
v1 = NodePath("parent/child")
v2 = NodePath("parent/child")
assert v1 == v2
other = NodePath("parent/other_child")
assert not v1 == other # Force use of __eq__
@pytest.mark.parametrize(
"arg", [None, 0, "parent/child", NodePath("parent/other_child")]
)
def test_bad_equal(self, arg):
basis = NodePath("parent/child")
assert basis != arg
def test_repr(self):
v = NodePath("/root/leaf")
assert repr(v) == "<NodePath(path='/root/leaf')>"
@pytest.mark.parametrize("args", [(), (42,), (None,)])
def test_bad_build(self, args):
with pytest.raises(TypeError):
NodePath(*args)
@pytest.mark.parametrize(
"field,ret_type,params",
[
["get_name", str, (0,)],
["get_name_count", int, ()],
["get_concatenated_subnames", str, ()],
["get_subname", str, (0,)],
["get_subname_count", int, ()],
["is_absolute", bool, ()],
["is_empty", bool, ()],
],
ids=lambda x: x[0],
)
def test_methods(self, field, ret_type, params):
v = NodePath("/foo")
# Don't test methods' validity but bindings one
assert hasattr(v, field)
method = getattr(v, field)
assert callable(method)
ret = method(*params)
assert isinstance(ret, ret_type)
| en | 0.80935 | # Force use of __eq__ # Don't test methods' validity but bindings one | 2.555259 | 3 |
test.py | andy971022/CWGP | 7 | 6633059 | import matplotlib.pyplot as plt
import autograd.numpy as np
import seaborn as sns
from scipy import stats
from cwgp.cwgp import CWGP
from cwgp.grid_search import grid_search
import cwgp
print(cwgp)
np.random.seed(seed=32)
SIZE = 70
betas = np.random.exponential(scale=5, size=SIZE)
sns.distplot(betas)
plt.show()
compgp = CWGP(["sa", "box_cox", "box_cox"])
model = compgp.fit(betas, np.arange(SIZE, dtype="float"), verbose=True)
print(compgp.phi.res.x)
transformed_betas, d = compgp.phi.comp_phi(model.x, betas)
sns.distplot(transformed_betas)
plt.show()
plt.plot(np.arange(SIZE, dtype="float"), betas)
plt.show()
stats.probplot(transformed_betas, dist="norm", plot=plt)
plt.show()
plt.plot(np.arange(SIZE, dtype="float"), transformed_betas)
plt.show()
print(model.x)
inv_transformed_betas = compgp.phi.inv_comp_phi(model.x, transformed_betas)
fig, ax = plt.subplots(1, 2)
sns.distplot(inv_transformed_betas, ax=ax[0])
sns.distplot(betas, ax=ax[1])
plt.show()
def estimator(**kwargs):
if kwargs.get("cv", False):
y_train = kwargs["y_train"]
y_val = kwargs["y_val"]
x_train = kwargs["x_train"]
x_val = kwargs["x_val"]
cwgp_model = kwargs["cwgp_model"]
y_train, y_d = cwgp_model.phi.comp_phi(
cwgp_model.phi.res.x, y_train)
y_val, y_d = cwgp_model.phi.comp_phi(
cwgp_model.phi.res.x, y_val)
sns.distplot(y_train)
plt.show()
# stats.probplot(y_train, dist="norm", plot=plt)
sns.distplot(y_val)
# stats.probplot(y_val, dist="norm", plot=plt)
plt.show()
# second param is a place holder
# should give 9^3 combinations
# grid_search(
# estimator, betas, np.arange(SIZE, dtype="float"), {
# "c": 1, "transformations": [
# "sa"]}, test="hi")
grid_search(
estimator, np.arange(SIZE, dtype="float"), betas, {
"c": 4, "transformations": [
"box_cox", "sa", "sal"]}, test="hi", cv=True, n_splits=3, verbose=True)
| import matplotlib.pyplot as plt
import autograd.numpy as np
import seaborn as sns
from scipy import stats
from cwgp.cwgp import CWGP
from cwgp.grid_search import grid_search
import cwgp
print(cwgp)
np.random.seed(seed=32)
SIZE = 70
betas = np.random.exponential(scale=5, size=SIZE)
sns.distplot(betas)
plt.show()
compgp = CWGP(["sa", "box_cox", "box_cox"])
model = compgp.fit(betas, np.arange(SIZE, dtype="float"), verbose=True)
print(compgp.phi.res.x)
transformed_betas, d = compgp.phi.comp_phi(model.x, betas)
sns.distplot(transformed_betas)
plt.show()
plt.plot(np.arange(SIZE, dtype="float"), betas)
plt.show()
stats.probplot(transformed_betas, dist="norm", plot=plt)
plt.show()
plt.plot(np.arange(SIZE, dtype="float"), transformed_betas)
plt.show()
print(model.x)
inv_transformed_betas = compgp.phi.inv_comp_phi(model.x, transformed_betas)
fig, ax = plt.subplots(1, 2)
sns.distplot(inv_transformed_betas, ax=ax[0])
sns.distplot(betas, ax=ax[1])
plt.show()
def estimator(**kwargs):
if kwargs.get("cv", False):
y_train = kwargs["y_train"]
y_val = kwargs["y_val"]
x_train = kwargs["x_train"]
x_val = kwargs["x_val"]
cwgp_model = kwargs["cwgp_model"]
y_train, y_d = cwgp_model.phi.comp_phi(
cwgp_model.phi.res.x, y_train)
y_val, y_d = cwgp_model.phi.comp_phi(
cwgp_model.phi.res.x, y_val)
sns.distplot(y_train)
plt.show()
# stats.probplot(y_train, dist="norm", plot=plt)
sns.distplot(y_val)
# stats.probplot(y_val, dist="norm", plot=plt)
plt.show()
# second param is a place holder
# should give 9^3 combinations
# grid_search(
# estimator, betas, np.arange(SIZE, dtype="float"), {
# "c": 1, "transformations": [
# "sa"]}, test="hi")
grid_search(
estimator, np.arange(SIZE, dtype="float"), betas, {
"c": 4, "transformations": [
"box_cox", "sa", "sal"]}, test="hi", cv=True, n_splits=3, verbose=True)
| en | 0.426464 | # stats.probplot(y_train, dist="norm", plot=plt) # stats.probplot(y_val, dist="norm", plot=plt) # second param is a place holder # should give 9^3 combinations # grid_search( # estimator, betas, np.arange(SIZE, dtype="float"), { # "c": 1, "transformations": [ # "sa"]}, test="hi") | 2.386594 | 2 |
epush_bot.py | yuiokjgft/pods | 16 | 6633060 | <filename>epush_bot.py
import telebot
import os
from config import *
from flask import Flask, request
server = Flask(__name__)
import importdir
importdir.do("features", globals())
@server.route('/'+ TOKEN, methods=['POST'])
def getMessage():
request_object = request.stream.read().decode("utf-8")
update_to_json = [telebot.types.Update.de_json(request_object)]
bot.process_new_updates(update_to_json)
return "got Message bro"
@server.route('/hook')
def webhook():
url=URL
bot.remove_webhook()
bot.set_webhook(url + TOKEN)
return f"Webhook set to {url}"
@server.route('/')
def thanks():
url=URL
return f"Thanks you've reach chukwudi bot {url}"
if DEBUG==True:
bot.remove_webhook()
bot.polling()
else:
if __name__ == "__main__":
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000))) | <filename>epush_bot.py
import telebot
import os
from config import *
from flask import Flask, request
server = Flask(__name__)
import importdir
importdir.do("features", globals())
@server.route('/'+ TOKEN, methods=['POST'])
def getMessage():
request_object = request.stream.read().decode("utf-8")
update_to_json = [telebot.types.Update.de_json(request_object)]
bot.process_new_updates(update_to_json)
return "got Message bro"
@server.route('/hook')
def webhook():
url=URL
bot.remove_webhook()
bot.set_webhook(url + TOKEN)
return f"Webhook set to {url}"
@server.route('/')
def thanks():
url=URL
return f"Thanks you've reach chukwudi bot {url}"
if DEBUG==True:
bot.remove_webhook()
bot.polling()
else:
if __name__ == "__main__":
server.run(host="0.0.0.0", port=int(os.environ.get('PORT', 5000))) | none | 1 | 2.388945 | 2 |
|
tests/test_08_config/test_filters.py | primal100/aionetworking | 0 | 6633061 | <reponame>primal100/aionetworking<filename>tests/test_08_config/test_filters.py
import logging
import operator
import pytest
from aionetworking.utils import Expression, in_
class TestExpression:
@pytest.mark.parametrize('attr,obj', (
[None, False],
['self', False],
['method', True]
))
def test_01_expression(self, attr, obj, json_rpc_login_request_object, json_rpc_logout_request_object):
expr = Expression(attr, operator.eq, 'login')
assert expr.op == operator.eq
assert expr.case_sensitive is False
assert expr.attr == attr
assert expr.value == 'login'
if obj:
assert expr(json_rpc_login_request_object) is True
assert expr(json_rpc_logout_request_object) is False
else:
assert expr('login') is True
assert expr('logout') is False
@pytest.mark.parametrize('expression,op,case_sensitive,attr,value,login,logout', (
['method = login', operator.eq, False, 'method', 'login', True, False],
['method i= Login', operator.eq, True, 'method', 'login', True, False],
['method = Login', operator.eq, False, 'method', 'Login', False, False],
['id = 1', operator.eq, False, 'id', '1', True, False],
['id > 1', operator.gt, False, 'id', '1', False, True],
['received', operator.eq, False, 'received', True, False, False],
['not received', operator.eq, False, 'received', False, True, True],
['method contains out', operator.contains, False, 'method', 'out', False, True],
['method contains log', operator.contains, False, 'method', 'log', True, True],
['method contains Out', operator.contains, False, 'method', 'Out', False, False],
['method icontains Out', operator.contains, True, 'method', 'out', False, True],
['method in logins', in_, False, 'method', 'logins', True, False],
['method in Logins', in_, False, 'method', 'Logins', False, False],
['method iin Logins', in_, True, 'method', 'logins', True, False],
))
def test_01_expression_from_string(self, json_rpc_login_request_object, json_rpc_logout_request_object,
expression: str, op, case_sensitive, attr, value, login, logout):
expr = Expression.from_string(expression)
assert expr.op.callable == op
assert expr.case_sensitive is case_sensitive
assert expr.attr == attr
assert expr.value == value
assert expr(json_rpc_login_request_object) is login
assert expr(json_rpc_logout_request_object) is logout
class TestPeerFilter:
def test_00_peer_filter_log_record_included(self, peer_filter, log_record):
assert peer_filter.filter(log_record) is True
def test_01_peer_filter_log_record_not_included(self, peer_filter, log_record_not_included):
assert peer_filter.filter(log_record_not_included) is False
def test_02_peer_filter_included(self, receiver_connection_logger, peer_filter, debug_logging, caplog):
logging.getLogger('receiver.connection').addFilter(peer_filter)
receiver_connection_logger.new_connection()
assert len(caplog.record_tuples) == 2
def test_03_peer_filter_not_included(self, receiver_connection_logger_wrong_peer, peer_filter, caplog):
logging.getLogger('receiver.connection').addFilter(peer_filter)
receiver_connection_logger_wrong_peer.new_connection()
assert len(caplog.record_tuples) == 0
class TestMessageFilter:
def test_00_msg_filter_log_record_included(self, message_filter, log_record_msg_object):
assert message_filter.filter(log_record_msg_object) is True
def test_01_msg_filter_log_record_not_included(self, message_filter, log_record_msg_object_not_included):
assert message_filter.filter(log_record_msg_object_not_included) is False
def test_02_msg_filter_included(self, receiver_connection_logger, message_filter, json_rpc_login_request_object,
debug_logging, caplog):
logging.getLogger('receiver.msg_received').addFilter(message_filter)
receiver_connection_logger.on_msg_decoded(json_rpc_login_request_object)
assert len(caplog.record_tuples) == 1
def test_03_msg_filter_not_included(self, receiver_connection_logger, message_filter,
json_rpc_logout_request_object, debug_logging, caplog):
logging.getLogger('receiver.msg_received').addFilter(message_filter)
receiver_connection_logger.on_msg_decoded(json_rpc_logout_request_object)
assert len(caplog.record_tuples) == 0
def test_04_msg_logger_filter_included(self, receiver_connection_logger, message_filter,
json_rpc_login_request_object, debug_logging, caplog):
logging.getLogger('receiver.msg').addFilter(message_filter)
json_rpc_login_request_object.logger.debug('Hello World')
assert len(caplog.record_tuples) == 1
def test_05_msg_logger_filter_not_included(self, receiver_connection_logger, message_filter,
json_rpc_logout_request_object, debug_logging, caplog):
logging.getLogger('receiver.msg').addFilter(message_filter)
json_rpc_logout_request_object.logger.debug('Hello World')
assert len(caplog.record_tuples) == 0
| import logging
import operator
import pytest
from aionetworking.utils import Expression, in_
class TestExpression:
@pytest.mark.parametrize('attr,obj', (
[None, False],
['self', False],
['method', True]
))
def test_01_expression(self, attr, obj, json_rpc_login_request_object, json_rpc_logout_request_object):
expr = Expression(attr, operator.eq, 'login')
assert expr.op == operator.eq
assert expr.case_sensitive is False
assert expr.attr == attr
assert expr.value == 'login'
if obj:
assert expr(json_rpc_login_request_object) is True
assert expr(json_rpc_logout_request_object) is False
else:
assert expr('login') is True
assert expr('logout') is False
@pytest.mark.parametrize('expression,op,case_sensitive,attr,value,login,logout', (
['method = login', operator.eq, False, 'method', 'login', True, False],
['method i= Login', operator.eq, True, 'method', 'login', True, False],
['method = Login', operator.eq, False, 'method', 'Login', False, False],
['id = 1', operator.eq, False, 'id', '1', True, False],
['id > 1', operator.gt, False, 'id', '1', False, True],
['received', operator.eq, False, 'received', True, False, False],
['not received', operator.eq, False, 'received', False, True, True],
['method contains out', operator.contains, False, 'method', 'out', False, True],
['method contains log', operator.contains, False, 'method', 'log', True, True],
['method contains Out', operator.contains, False, 'method', 'Out', False, False],
['method icontains Out', operator.contains, True, 'method', 'out', False, True],
['method in logins', in_, False, 'method', 'logins', True, False],
['method in Logins', in_, False, 'method', 'Logins', False, False],
['method iin Logins', in_, True, 'method', 'logins', True, False],
))
def test_01_expression_from_string(self, json_rpc_login_request_object, json_rpc_logout_request_object,
expression: str, op, case_sensitive, attr, value, login, logout):
expr = Expression.from_string(expression)
assert expr.op.callable == op
assert expr.case_sensitive is case_sensitive
assert expr.attr == attr
assert expr.value == value
assert expr(json_rpc_login_request_object) is login
assert expr(json_rpc_logout_request_object) is logout
class TestPeerFilter:
def test_00_peer_filter_log_record_included(self, peer_filter, log_record):
assert peer_filter.filter(log_record) is True
def test_01_peer_filter_log_record_not_included(self, peer_filter, log_record_not_included):
assert peer_filter.filter(log_record_not_included) is False
def test_02_peer_filter_included(self, receiver_connection_logger, peer_filter, debug_logging, caplog):
logging.getLogger('receiver.connection').addFilter(peer_filter)
receiver_connection_logger.new_connection()
assert len(caplog.record_tuples) == 2
def test_03_peer_filter_not_included(self, receiver_connection_logger_wrong_peer, peer_filter, caplog):
logging.getLogger('receiver.connection').addFilter(peer_filter)
receiver_connection_logger_wrong_peer.new_connection()
assert len(caplog.record_tuples) == 0
class TestMessageFilter:
def test_00_msg_filter_log_record_included(self, message_filter, log_record_msg_object):
assert message_filter.filter(log_record_msg_object) is True
def test_01_msg_filter_log_record_not_included(self, message_filter, log_record_msg_object_not_included):
assert message_filter.filter(log_record_msg_object_not_included) is False
def test_02_msg_filter_included(self, receiver_connection_logger, message_filter, json_rpc_login_request_object,
debug_logging, caplog):
logging.getLogger('receiver.msg_received').addFilter(message_filter)
receiver_connection_logger.on_msg_decoded(json_rpc_login_request_object)
assert len(caplog.record_tuples) == 1
def test_03_msg_filter_not_included(self, receiver_connection_logger, message_filter,
json_rpc_logout_request_object, debug_logging, caplog):
logging.getLogger('receiver.msg_received').addFilter(message_filter)
receiver_connection_logger.on_msg_decoded(json_rpc_logout_request_object)
assert len(caplog.record_tuples) == 0
def test_04_msg_logger_filter_included(self, receiver_connection_logger, message_filter,
json_rpc_login_request_object, debug_logging, caplog):
logging.getLogger('receiver.msg').addFilter(message_filter)
json_rpc_login_request_object.logger.debug('Hello World')
assert len(caplog.record_tuples) == 1
def test_05_msg_logger_filter_not_included(self, receiver_connection_logger, message_filter,
json_rpc_logout_request_object, debug_logging, caplog):
logging.getLogger('receiver.msg').addFilter(message_filter)
json_rpc_logout_request_object.logger.debug('Hello World')
assert len(caplog.record_tuples) == 0 | none | 1 | 2.43136 | 2 |
|
lightautoml/text/utils.py | Zhurik/LightAutoML | 0 | 6633062 | """Text utility script."""
import os
import random
from typing import Dict, List, Sequence
import numpy as np
import torch
from log_calls import record_history
from sklearn.utils.murmurhash import murmurhash3_32
_dtypes_mapping = {'label': 'float',
'cat': 'long',
'cont': 'float',
'weight': 'float',
'input_ids': 'long',
'attention_mask': 'long',
'token_type_ids': 'long',
'text': 'float', # embeddings
'length': 'long'}
@record_history(enabled=False)
def inv_sigmoid(x: np.ndarray) -> np.ndarray:
"""Inverse sigmoid transformation.
Args:
x: Input array.
Returns:
Transformed array.
"""
return np.log(x / (1 - x))
@record_history(enabled=False)
def inv_softmax(x: np.ndarray) -> np.ndarray:
"""Variant of inverse softmax transformation with zero constant term.
Args:
x: Input array.
Returns:
Transformed array.
"""
eps = 1e-7
x = np.abs(x)
arr = (x + eps) / (np.sum(x) + eps)
arr = np.log(arr)
return arr
@record_history(enabled=False)
def is_shuffle(stage: str) -> bool:
"""Whether shuffle input.
Args:
stage: Train, val, test.
Returns:
Bool value.
"""
is_sh = {'train': True, 'val': False, 'test': False}
return is_sh[stage]
@record_history(enabled=False)
def seed_everything(seed: int = 42, deterministic: bool = True):
"""Set random seed and cudnn params.
Args:
seed: Random state.
deterministic: cudnn backend.
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
@record_history(enabled=False)
def parse_devices(dvs, is_dp: bool = False) -> tuple:
"""Parse devices and convert first to the torch device.
Args:
dvs: List, string with device ids or torch.device.
is_dp: Use data parallel - additionally returns device ids.
Returns:
First torch device and list of gpu ids.
"""
device = []
ids = []
if (not torch.cuda.is_available()) or (dvs is None):
return torch.device('cpu'), None
if not isinstance(dvs, (list, tuple)):
dvs = [dvs]
for _device in dvs:
if isinstance(_device, str):
if _device.startswith('cuda:'):
ids.append(int(_device.split('cuda:')[-1]))
elif _device == 'cuda':
ids.append(0)
elif _device == 'cpu':
return torch.device('cpu'), None
else:
ids.append(int(_device))
_device = torch.device(int(_device))
elif isinstance(_device, int):
ids.append(_device)
_device = torch.device('cuda:{}'.format(_device))
elif isinstance(_device, torch.device):
if _device.type == 'cpu':
return _device, None
else:
if _device.index is None:
ids.append(0)
else:
ids.append(_device.index)
else:
raise ValueError('Unknown device type: {}'.format(_device))
device.append(_device)
return device[0], ids if (len(device) > 1) and is_dp else None
@record_history(enabled=False)
def custom_collate(batch: List[np.ndarray]) -> torch.Tensor:
"""Puts each data field into a tensor with outer dimension batch size."""
elem = batch[0]
if isinstance(elem, torch.Tensor):
out = None
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
else:
return torch.from_numpy(np.array(batch)).float()
@record_history(enabled=False)
def collate_dict(batch: List[Dict[str, np.ndarray]]) -> Dict[str, torch.Tensor]:
"""custom_collate for dicts."""
keys = list(batch[0].keys())
transposed_data = list(map(list, zip(*[tuple([i[name] for name in i.keys()]) for i in batch])))
return {key: custom_collate(transposed_data[n]) for n, key in enumerate(keys)}
@record_history(enabled=False)
def single_text_hash(x: str) -> str:
"""Get text hash.
Args:
x: Text.
Returns:
String text hash.
"""
numhash = murmurhash3_32(x, seed=13)
texthash = str(numhash) if numhash > 0 else 'm' + str(abs(numhash))
return texthash
@record_history(enabled=False)
def get_textarr_hash(x: Sequence[str]) -> str:
"""Get hash of array with texts.
Args:
x: Text array.
Returns:
Hash of array.
"""
full_hash = single_text_hash(str(x))
n = 0
for text in x:
if text != '':
full_hash += '_' + single_text_hash(text)
n += 1
if n >= 3:
break
return full_hash
| """Text utility script."""
import os
import random
from typing import Dict, List, Sequence
import numpy as np
import torch
from log_calls import record_history
from sklearn.utils.murmurhash import murmurhash3_32
_dtypes_mapping = {'label': 'float',
'cat': 'long',
'cont': 'float',
'weight': 'float',
'input_ids': 'long',
'attention_mask': 'long',
'token_type_ids': 'long',
'text': 'float', # embeddings
'length': 'long'}
@record_history(enabled=False)
def inv_sigmoid(x: np.ndarray) -> np.ndarray:
"""Inverse sigmoid transformation.
Args:
x: Input array.
Returns:
Transformed array.
"""
return np.log(x / (1 - x))
@record_history(enabled=False)
def inv_softmax(x: np.ndarray) -> np.ndarray:
"""Variant of inverse softmax transformation with zero constant term.
Args:
x: Input array.
Returns:
Transformed array.
"""
eps = 1e-7
x = np.abs(x)
arr = (x + eps) / (np.sum(x) + eps)
arr = np.log(arr)
return arr
@record_history(enabled=False)
def is_shuffle(stage: str) -> bool:
"""Whether shuffle input.
Args:
stage: Train, val, test.
Returns:
Bool value.
"""
is_sh = {'train': True, 'val': False, 'test': False}
return is_sh[stage]
@record_history(enabled=False)
def seed_everything(seed: int = 42, deterministic: bool = True):
"""Set random seed and cudnn params.
Args:
seed: Random state.
deterministic: cudnn backend.
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
@record_history(enabled=False)
def parse_devices(dvs, is_dp: bool = False) -> tuple:
"""Parse devices and convert first to the torch device.
Args:
dvs: List, string with device ids or torch.device.
is_dp: Use data parallel - additionally returns device ids.
Returns:
First torch device and list of gpu ids.
"""
device = []
ids = []
if (not torch.cuda.is_available()) or (dvs is None):
return torch.device('cpu'), None
if not isinstance(dvs, (list, tuple)):
dvs = [dvs]
for _device in dvs:
if isinstance(_device, str):
if _device.startswith('cuda:'):
ids.append(int(_device.split('cuda:')[-1]))
elif _device == 'cuda':
ids.append(0)
elif _device == 'cpu':
return torch.device('cpu'), None
else:
ids.append(int(_device))
_device = torch.device(int(_device))
elif isinstance(_device, int):
ids.append(_device)
_device = torch.device('cuda:{}'.format(_device))
elif isinstance(_device, torch.device):
if _device.type == 'cpu':
return _device, None
else:
if _device.index is None:
ids.append(0)
else:
ids.append(_device.index)
else:
raise ValueError('Unknown device type: {}'.format(_device))
device.append(_device)
return device[0], ids if (len(device) > 1) and is_dp else None
@record_history(enabled=False)
def custom_collate(batch: List[np.ndarray]) -> torch.Tensor:
"""Puts each data field into a tensor with outer dimension batch size."""
elem = batch[0]
if isinstance(elem, torch.Tensor):
out = None
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
else:
return torch.from_numpy(np.array(batch)).float()
@record_history(enabled=False)
def collate_dict(batch: List[Dict[str, np.ndarray]]) -> Dict[str, torch.Tensor]:
"""custom_collate for dicts."""
keys = list(batch[0].keys())
transposed_data = list(map(list, zip(*[tuple([i[name] for name in i.keys()]) for i in batch])))
return {key: custom_collate(transposed_data[n]) for n, key in enumerate(keys)}
@record_history(enabled=False)
def single_text_hash(x: str) -> str:
"""Get text hash.
Args:
x: Text.
Returns:
String text hash.
"""
numhash = murmurhash3_32(x, seed=13)
texthash = str(numhash) if numhash > 0 else 'm' + str(abs(numhash))
return texthash
@record_history(enabled=False)
def get_textarr_hash(x: Sequence[str]) -> str:
"""Get hash of array with texts.
Args:
x: Text array.
Returns:
Hash of array.
"""
full_hash = single_text_hash(str(x))
n = 0
for text in x:
if text != '':
full_hash += '_' + single_text_hash(text)
n += 1
if n >= 3:
break
return full_hash
| en | 0.681339 | Text utility script. # embeddings Inverse sigmoid transformation. Args: x: Input array. Returns: Transformed array. Variant of inverse softmax transformation with zero constant term. Args: x: Input array. Returns: Transformed array. Whether shuffle input. Args: stage: Train, val, test. Returns: Bool value. Set random seed and cudnn params. Args: seed: Random state. deterministic: cudnn backend. Parse devices and convert first to the torch device. Args: dvs: List, string with device ids or torch.device. is_dp: Use data parallel - additionally returns device ids. Returns: First torch device and list of gpu ids. Puts each data field into a tensor with outer dimension batch size. custom_collate for dicts. Get text hash. Args: x: Text. Returns: String text hash. Get hash of array with texts. Args: x: Text array. Returns: Hash of array. | 2.477824 | 2 |
data/external/repositories/137656/blundercheck-master/combine/contest_20150213a/modeling/fit_linear_pgmodel.py | Keesiu/meta-kaggle | 0 | 6633063 | <filename>data/external/repositories/137656/blundercheck-master/combine/contest_20150213a/modeling/fit_linear_pgmodel.py
#!/usr/bin/env python
import sys, time
import numpy as np
import cPickle as pickle
from pandas import DataFrame
from pandas import read_pickle
from pandas import get_dummies
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from djeval import *
msg("Hi, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
msg("Getting subset ready.")
# TODO save the dummies along with yy_df
dummies = get_dummies(yy_df['opening_feature'])
# TODO save the moveelo_features along with yy_df
moveelo_features = [("moveelo_" + x) for x in ['mean', 'median', '25', '10', 'min', 'max', 'stdev']]
new_depth_cols = ['mean_num_bestmoves', 'mean_num_bestmove_changes', 'mean_bestmove_depths_agreeing', 'mean_deepest_change', 'mean_deepest_change_ratio']
train = yy_df[yy_df.meanerror.notnull() & yy_df.elo.notnull()]
formula_rhs = "side + nmerror + gameoutcome + drawn_game + gamelength + meanecho"
formula_rhs = formula_rhs + " + opponent_nmerror + opponent_noblunders"
formula_rhs = formula_rhs + " + min_nmerror + early_lead"
formula_rhs = formula_rhs + " + q_error_one + q_error_two"
formula_rhs = formula_rhs + " + opponent_q_error_one"
formula_rhs = formula_rhs + " + mean_depth_clipped + mean_seldepth"
formula_rhs = formula_rhs + " + mean_depths_ar + mean_deepest_ar"
formula_rhs = formula_rhs + " + opponent_mean_depths_ar + opponent_mean_deepest_ar"
formula_rhs = formula_rhs + " + pct_sanemoves"
formula_rhs = formula_rhs + " + " + " + ".join(dummies.columns.values)
formula_rhs = formula_rhs + " + moveelo_weighted"
formula_rhs = formula_rhs + " + " + " + ".join(new_depth_cols)
# Never mind these, they didnt help much
#formula_rhs = formula_rhs + " + " + " + ".join(moveelo_features)
formula = "elo ~ " + formula_rhs
msg("Fitting!")
ols = sm.ols(formula=formula, data=train).fit()
print ols.summary()
msg("Making predictions for all playergames")
yy_df['ols_prediction'] = ols.predict(yy_df)
yy_df['ols_error'] = (yy_df['ols_prediction'] - yy_df['elo']).abs()
yy_df['training'] = yy_df['elo'].notnull()
insample_scores = yy_df.groupby('training')['ols_error'].agg({'mean' : np.mean, 'median' : np.median, 'stdev': np.std})
print insample_scores
msg("Writing yy_df back out with ols predictions inside")
yy_df.to_pickle(sys.argv[1])
| <filename>data/external/repositories/137656/blundercheck-master/combine/contest_20150213a/modeling/fit_linear_pgmodel.py
#!/usr/bin/env python
import sys, time
import numpy as np
import cPickle as pickle
from pandas import DataFrame
from pandas import read_pickle
from pandas import get_dummies
import statsmodels.formula.api as sm
from sklearn.externals import joblib
from djeval import *
msg("Hi, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
msg("Getting subset ready.")
# TODO save the dummies along with yy_df
dummies = get_dummies(yy_df['opening_feature'])
# TODO save the moveelo_features along with yy_df
moveelo_features = [("moveelo_" + x) for x in ['mean', 'median', '25', '10', 'min', 'max', 'stdev']]
new_depth_cols = ['mean_num_bestmoves', 'mean_num_bestmove_changes', 'mean_bestmove_depths_agreeing', 'mean_deepest_change', 'mean_deepest_change_ratio']
train = yy_df[yy_df.meanerror.notnull() & yy_df.elo.notnull()]
formula_rhs = "side + nmerror + gameoutcome + drawn_game + gamelength + meanecho"
formula_rhs = formula_rhs + " + opponent_nmerror + opponent_noblunders"
formula_rhs = formula_rhs + " + min_nmerror + early_lead"
formula_rhs = formula_rhs + " + q_error_one + q_error_two"
formula_rhs = formula_rhs + " + opponent_q_error_one"
formula_rhs = formula_rhs + " + mean_depth_clipped + mean_seldepth"
formula_rhs = formula_rhs + " + mean_depths_ar + mean_deepest_ar"
formula_rhs = formula_rhs + " + opponent_mean_depths_ar + opponent_mean_deepest_ar"
formula_rhs = formula_rhs + " + pct_sanemoves"
formula_rhs = formula_rhs + " + " + " + ".join(dummies.columns.values)
formula_rhs = formula_rhs + " + moveelo_weighted"
formula_rhs = formula_rhs + " + " + " + ".join(new_depth_cols)
# Never mind these, they didnt help much
#formula_rhs = formula_rhs + " + " + " + ".join(moveelo_features)
formula = "elo ~ " + formula_rhs
msg("Fitting!")
ols = sm.ols(formula=formula, data=train).fit()
print ols.summary()
msg("Making predictions for all playergames")
yy_df['ols_prediction'] = ols.predict(yy_df)
yy_df['ols_error'] = (yy_df['ols_prediction'] - yy_df['elo']).abs()
yy_df['training'] = yy_df['elo'].notnull()
insample_scores = yy_df.groupby('training')['ols_error'].agg({'mean' : np.mean, 'median' : np.median, 'stdev': np.std})
print insample_scores
msg("Writing yy_df back out with ols predictions inside")
yy_df.to_pickle(sys.argv[1])
| en | 0.73992 | #!/usr/bin/env python # TODO save the dummies along with yy_df # TODO save the moveelo_features along with yy_df # Never mind these, they didnt help much #formula_rhs = formula_rhs + " + " + " + ".join(moveelo_features) | 1.841076 | 2 |
PyScripts/New-KeeperRecordAttachment.py | tonylanglet/keepersecurity-powershell | 2 | 6633064 | <filename>PyScripts/New-KeeperRecordAttachment.py
import sys
import getopt
import getpass
import string
import argparse
from keepercommander.record import Record
from keepercommander.commands.record import RecordUploadAttachmentCommand
from keepercommander.params import KeeperParams
from keepercommander import display, api
my_params = KeeperParams()
# MAIN FUNCTION
def main(argv):
# Authentication credentials
authUsername = None
authPassword = None
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--record', action='store', help='Folder UID', required=True)
parser.add_argument('--file', dest='file', action='append', help='file path', required=True)
parser.add_argument('-auser', '--ausername', type=str, help='Authentication username', required=True)
parser.add_argument('-apass', '--apassword', type=str, help='Authentication password', required=True)
args = parser.parse_args()
Parameters = dict()
if args.record is not None:
Parameters.update({'record':args.record})
if args.file is not None:
Parameters.update({'file':args.file})
if args.ausername:
authUsername = args.ausername
if args.apassword:
authPassword = <PASSWORD>
while not my_params.user:
my_params.user = authUsername
while not my_params.password:
my_params.password = <PASSWORD>
api.sync_down(my_params)
# KEEPER COMMAND
command = RecordUploadAttachmentCommand()
result = command.execute(my_params, **Parameters)
print(result)
return result
if __name__ == "__main__":
main(sys.argv[1:])
| <filename>PyScripts/New-KeeperRecordAttachment.py
import sys
import getopt
import getpass
import string
import argparse
from keepercommander.record import Record
from keepercommander.commands.record import RecordUploadAttachmentCommand
from keepercommander.params import KeeperParams
from keepercommander import display, api
my_params = KeeperParams()
# MAIN FUNCTION
def main(argv):
# Authentication credentials
authUsername = None
authPassword = None
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--record', action='store', help='Folder UID', required=True)
parser.add_argument('--file', dest='file', action='append', help='file path', required=True)
parser.add_argument('-auser', '--ausername', type=str, help='Authentication username', required=True)
parser.add_argument('-apass', '--apassword', type=str, help='Authentication password', required=True)
args = parser.parse_args()
Parameters = dict()
if args.record is not None:
Parameters.update({'record':args.record})
if args.file is not None:
Parameters.update({'file':args.file})
if args.ausername:
authUsername = args.ausername
if args.apassword:
authPassword = <PASSWORD>
while not my_params.user:
my_params.user = authUsername
while not my_params.password:
my_params.password = <PASSWORD>
api.sync_down(my_params)
# KEEPER COMMAND
command = RecordUploadAttachmentCommand()
result = command.execute(my_params, **Parameters)
print(result)
return result
if __name__ == "__main__":
main(sys.argv[1:])
| en | 0.575125 | # MAIN FUNCTION # Authentication credentials # Arguments # KEEPER COMMAND | 2.993375 | 3 |
blur/estimation/CNN/main.py | MaikWischow/Camera-Condition-Monitoring | 3 | 6633065 | import numpy as np
import os
import math
import glob
import cv2
from buildModel import MTFNet
from imgProcessing import preProcessTestImages
from prepareTrainingData import readTFRecordDataset
import tensorflow as tf
from tensorflow.python.keras.models import load_model, Model
from tensorflow.python import keras
from tensorflow.python.keras.callbacks import ModelCheckpoint
import sys
sys.path.append(r"../../../utils")
from objectDetectionUtils import getObjDetRoI, getPatchRoI, calculateIoU
IMG_PATCH_SIZE = 192
BASE_LR = 1e-4
TRAINING_BATCH_SIZE = 32
FRACTION_VALIDATION_DATA = 0.15
TRAINING_DATASET_BUFFER_SIZE = 1000
TRAINING_EPOCHS = 100
CNN_INPUT_SHAPE = (32,32,72)
IOU_THRESH = 0.7 # Only for function "estimateObjDetsMTF"
DEFAULT_MODEL_PATH = r"./model"
fileTypes = ['*.tiff', '*.png', "*.jpeg", "*.tif", "*.jpg", "*.gif"]
def prepareModel(modelPath = DEFAULT_MODEL_PATH):
"""
Load or create a CNN model for MTF estimation.
:param modelPath: Path to a CNN model to load.
:return: Ready-to-use CNN model.
"""
# Preparation
tf.keras.backend.set_image_data_format('channels_last')
keras.backend.clear_session()
tf.python.framework.ops.reset_default_graph()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Load or create CNN model
if os.path.isfile(os.path.join(modelPath, "saved_model.pb")):
M = tf.keras.models.load_model(modelPath)
print("Successfully loaded checkpoint.")
else:
M = MTFNet(CNN_INPUT_SHAPE)
print("No checkpoint found, created new model.")
# Compile CNN model
M.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=BASE_LR),
loss="mean_squared_error",
metrics=["mse"]
)
return M
def extractSubModelFC(mainModel, startIdx, endIdx):
"""
Auxiliary function to extract the fully-connected sub-model.
:param mainModel: The full CNN model.
:param startIdx: Index of the first layer to extract.
:param endIdx: Index of the last layer to extract.
:return: Extracted fully-connected sub-model.
"""
inputShape = mainModel.layers[startIdx].input.get_shape().as_list()[-3:]
inputs = keras.Input(shape=inputShape)
for idx in range(startIdx, endIdx + 1):
if idx == startIdx:
x = mainModel.layers[idx](inputs)
else:
x = mainModel.layers[idx](x)
M = tf.keras.models.Model(inputs, x)
return M
def defineAveragingModel(model):
"""
Auxiliary function add an averaging layer to the CNN model.
:param model: CNN model for MTF estimation.
:return: CNN model including averaging layer.
"""
# Extract sub-models for feature extraction and prediction
subModelFeature = Model(inputs = model.layers[0].input, outputs = model.layers[-5].output)
subModelPrediction = extractSubModelFC(model, 55, 58)
# Add averaging layer and put all layers together
inputShape = subModelFeature.layers[0].input.get_shape().as_list()[-3:]
inputs = keras.Input(shape=inputShape)
results = subModelFeature(inputs)
avg = tf.keras.backend.mean(results, axis=0)
output = subModelPrediction(avg)
averagingModel = Model(inputs = inputs, outputs = output)
return averagingModel
def saveMTFResult(MTF, dirOut, numHPatches, numWPatches, resultFileNamePrefix="", resultFileNameSuffix=".npz"):
"""
Saves or prints a MTF estimation.
:param MTF: MTF estimation to save.
:param dirOut: Target directory to save the MTF esitmation.
:param numHPatches: Number of image patches in vertical direction.
:param numWPatches: Number of image patches in horizontal direction.
:param resultFilenamePrefix: Name prefix of the file to save (default: "")
:param resultFilenameSuffix: Name suffix of the file to save (default: ".npz")
:return: None.
"""
# Shape results
resultsH = np.zeros((numHPatches, numWPatches, 8))
resultsV = np.zeros((numHPatches, numWPatches, 8))
for idx in range(len(MTF) // 2):
hIdx = idx % numHPatches
wIdx = idx // numHPatches
resultsH[hIdx, wIdx] = MTF[2 * idx][0][0]
resultsV[hIdx, wIdx] = MTF[2 * idx + 1][0][0]
# Save results, if they not exist yet
if dirOut is not None:
if not os.path.exists(dirOut):
os.makedirs(dirOut)
pathMTFH = os.path.join(dirOut, resultFileNamePrefix + "_MTF-H" + resultFileNameSuffix)
pathMTFV = os.path.join(dirOut, resultFileNamePrefix + "_MTF-V" + resultFileNameSuffix)
if not os.path.exists(pathMTFH) and not os.path.exists(pathMTFV):
np.savez_compressed(pathMTFH, resultsH)
np.savez_compressed(pathMTFV, resultsV)
else:
print("MTF-H:", resultsH)
print("MTF-V:", resultsH)
def estimateMTF(model, imgPathBatch, dirOut=None):
"""
Apply a CNN model for MTF estimation.
:param model: The (ready-to-use) CNN model.
:param imgPathBatch: Array of images paths as input for the MTF estimation (default is a batch size of four images).
:param dirOut: Directory to save the MTF estimation results.
:return: None.
"""
# Create the CNN model
averagingModel = defineAveragingModel(model)
# Load images and extract green channel for MTF estimation (according to the original paper)
firstImageFileName = imgPathBatch[0].split(os.sep)[-1].split(".")[0]
lastImageFileName = imgPathBatch[-1].split(os.sep)[-1].split(".")[0]
imgBatch = [cv2.imread(imgPath) for imgPath in imgPathBatch]
imgBatch = [img[..., 1] for img in imgBatch if img is not None and len(img.shape) == 3]
imgBatch = np.array(imgBatch)
if imgBatch is not None and len(imgBatch) > 0:
# Pre-process images
h, w = imgBatch[0].shape[0:2]
numHPatches, numWPatches = math.ceil(h / IMG_PATCH_SIZE), math.ceil(w / IMG_PATCH_SIZE)
imgs = np.array(preProcessTestImages(imgBatch, IMG_PATCH_SIZE))
# Apply images to the CNN
MTF = averagingModel(tf.convert_to_tensor(imgs), training=False).numpy()
#Save and return results
saveMTFResult(MTF, dirOut, numHPatches, numWPatches, str(firstImageFileName) + "-" + str(lastImageFileName))
def estimateObjDetsMTF(model, imgPathBatch, objDets, dirOut=None):
"""
Apply a CNN model for MTF estimation for object detection patches only.
:param model: The (ready-to-use) CNN model.
:param imgPathBatch: Array of images paths.
:param dirOut: Directory to save the MTF estimation results.
:param objDets: Array of ground truth object detections annotations. One annotation consists of five variables:
objectClass, topLeftXCoordinate, topLeftYCoordinate, bottomRightXCoordinate, bottomRightYCoordinate
(according to the keras-YOLOv3 format).
:return: MTF Estimation results in horizontal and vertical image directions.
"""
# Create CNN model
averagingModel = defineAveragingModel(model)
# Load images and extract green channel for MTF estimation (according to the original paper)
imgBatch = [cv2.imread(imgPath) for imgPath in imgPathBatch]
imgBatch = [img[..., 1] for img in imgBatch if img is not None]
imgBatch = np.array(imgBatch)
if imgBatch is not None and len(imgBatch) > 0:
# Init cache
imgShape = imgBatch[0].shape
objDetRoICacheCoord = []
objDetRoICacheResult = []
# Iterate object detections
for objDetIdx, objDet in enumerate(objDets):
objClass, objx1, objy1, objx2, objy2 = objDet
startX, startY, endX, endY = getObjDetRoI(imgShape, IMG_PATCH_SIZE, objx1, objy1, objx2, objy2)
#Iterate corresponding image patches of detection
numPatchesX = math.ceil((endX - startX) / IMG_PATCH_SIZE)
numPatchesY = math.ceil((endY - startY) / IMG_PATCH_SIZE)
for idxX in range(numPatchesX):
for idxY in range (numPatchesY):
# Get image patch coordinates
startX_, startY_, endX_, endY_ = getPatchRoI(imgShape, IMG_PATCH_SIZE, startX, startY, idxX, idxY)
patchIdx = idxX * numPatchesY + idxY
resultFilenamePrefix = '_' + objClass + '_' + str(objDetIdx) + '_' + str(patchIdx)
# Search cache for fitting entry
cacheHit = False;
for cacheIdx in range(len(objDetRoICacheCoord)):
x1, y1, x2, y2 = objDetRoICacheCoord[cacheIdx]
iou = calculateIoU(startX_, startY_, endX_, endY_, x1, y1, x2, y2)
if iou >= IOU_THRESH:
MTF = objDetRoICacheResult[cacheIdx];
saveMTFResult(MTF, dirOut, 1, 1, resultFilenamePrefix)
cacheHit = True;
break
if cacheHit:
continue
existingFiles = glob.glob(os.path.join(dirOut, "*" + resultFilenamePrefix + "*"))
if len(existingFiles) == 0:
imgPatchBatch = imgBatch[:, startY_:endY_, startX_:endX_]
h, w = imgShape[0:2]
# Image pre-processing
imgs = preProcessTestImages(imgPatchBatch, IMG_PATCH_SIZE)
imgs = np.array(imgs)
# Inference on image batch
MTF = averagingModel(tf.convert_to_tensor(imgs), training=False).numpy()
# Save results
saveMTFResult(MTF, dirOut, 1, 1, resultFilenamePrefix)
objDetRoICacheCoord.append([startX_, startY_, endX_, endY_])
objDetRoICacheResult.append(MTF)
def train(model, trainDataSetPath, checkpointSaveDir):
"""
Train the CNN model for MTF esitmation.
:param model: The untrained CNN model.
:param trainDataSetPath: Path to the TFRecord file containt the training dataset.
:param checkPointSaveDir: Target directory to store the checkpoint files during training.
:return: None
"""
# Load the TFRecord training dataset and split it in training and validation sub-sets.
dataset = readTFRecordDataset(trainDataSetPath)
datasetSize = 160500 # Number of samples in the training dataset. It is not a good style but it saves runtime, though.
trainDataset = dataset.take(int(datasetSize * (1.0 - FRACTION_VALIDATION_DATA))).cache() \
.shuffle(buffer_size=TRAINING_DATASET_BUFFER_SIZE).batch(TRAINING_BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
remaining = dataset.skip(int(datasetSize * (1.0 - FRACTION_VALIDATION_DATA)))
validDataset = remaining.take(int(datasetSize * FRACTION_VALIDATION_DATA)).cache() \
.shuffle(buffer_size=TRAINING_DATASET_BUFFER_SIZE).batch(TRAINING_BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
# Prepare model callbacks
logdir = os.path.join(checkpointSaveDir, "logs")
# Tensorboard callback
tensordBoardCallback = keras.callbacks.TensorBoard(log_dir=logdir)
# Checkpoint callback.
modelCheckpointCallback = ModelCheckpoint(
filepath=checkpointSaveDir + "/weights-improvement-{epoch:02d}",
save_weights_only=False,
monitor='val_loss',
save_best_only=False,
save_freq=1000
)
callbacks = [
tensordBoardCallback,
modelCheckpointCallback
]
# Start the model training
history = model.fit(trainDataset, epochs=TRAINING_EPOCHS, verbose=1, validation_data=validDataset, initial_epoch=0, shuffle=True, callbacks=callbacks)
print(history.history)
# Example
# if __name__ == '__main__':
# # Train the model
# if False:
# trainDataSetPath = r"../../../data/CNNTrainingData.TFRecord" # Not part of this repository.
# checkPointSaveDir = r"./model/checkpoints"
# M = prepareModel(checkPointSaveDir)
# train(M, trainDataSetPath, checkPointSaveDir)
# # Test the model
# if False:
# imgDirIn = r"../../../data/udacity/img/GT"
# dirOut = r"../../../data/udacity/labels_blur_patchwise/CNN"
# imgFileEnding = ".jpg"
# imgPathBatch = glob.glob(os.path.join(imgDirIn, "*" + imgFileEnding))
# M = prepareModel()
# estimateMTF(M, imgPathBatch, dirOut) | import numpy as np
import os
import math
import glob
import cv2
from buildModel import MTFNet
from imgProcessing import preProcessTestImages
from prepareTrainingData import readTFRecordDataset
import tensorflow as tf
from tensorflow.python.keras.models import load_model, Model
from tensorflow.python import keras
from tensorflow.python.keras.callbacks import ModelCheckpoint
import sys
sys.path.append(r"../../../utils")
from objectDetectionUtils import getObjDetRoI, getPatchRoI, calculateIoU
IMG_PATCH_SIZE = 192
BASE_LR = 1e-4
TRAINING_BATCH_SIZE = 32
FRACTION_VALIDATION_DATA = 0.15
TRAINING_DATASET_BUFFER_SIZE = 1000
TRAINING_EPOCHS = 100
CNN_INPUT_SHAPE = (32,32,72)
IOU_THRESH = 0.7 # Only for function "estimateObjDetsMTF"
DEFAULT_MODEL_PATH = r"./model"
fileTypes = ['*.tiff', '*.png', "*.jpeg", "*.tif", "*.jpg", "*.gif"]
def prepareModel(modelPath = DEFAULT_MODEL_PATH):
"""
Load or create a CNN model for MTF estimation.
:param modelPath: Path to a CNN model to load.
:return: Ready-to-use CNN model.
"""
# Preparation
tf.keras.backend.set_image_data_format('channels_last')
keras.backend.clear_session()
tf.python.framework.ops.reset_default_graph()
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Load or create CNN model
if os.path.isfile(os.path.join(modelPath, "saved_model.pb")):
M = tf.keras.models.load_model(modelPath)
print("Successfully loaded checkpoint.")
else:
M = MTFNet(CNN_INPUT_SHAPE)
print("No checkpoint found, created new model.")
# Compile CNN model
M.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=BASE_LR),
loss="mean_squared_error",
metrics=["mse"]
)
return M
def extractSubModelFC(mainModel, startIdx, endIdx):
"""
Auxiliary function to extract the fully-connected sub-model.
:param mainModel: The full CNN model.
:param startIdx: Index of the first layer to extract.
:param endIdx: Index of the last layer to extract.
:return: Extracted fully-connected sub-model.
"""
inputShape = mainModel.layers[startIdx].input.get_shape().as_list()[-3:]
inputs = keras.Input(shape=inputShape)
for idx in range(startIdx, endIdx + 1):
if idx == startIdx:
x = mainModel.layers[idx](inputs)
else:
x = mainModel.layers[idx](x)
M = tf.keras.models.Model(inputs, x)
return M
def defineAveragingModel(model):
"""
Auxiliary function add an averaging layer to the CNN model.
:param model: CNN model for MTF estimation.
:return: CNN model including averaging layer.
"""
# Extract sub-models for feature extraction and prediction
subModelFeature = Model(inputs = model.layers[0].input, outputs = model.layers[-5].output)
subModelPrediction = extractSubModelFC(model, 55, 58)
# Add averaging layer and put all layers together
inputShape = subModelFeature.layers[0].input.get_shape().as_list()[-3:]
inputs = keras.Input(shape=inputShape)
results = subModelFeature(inputs)
avg = tf.keras.backend.mean(results, axis=0)
output = subModelPrediction(avg)
averagingModel = Model(inputs = inputs, outputs = output)
return averagingModel
def saveMTFResult(MTF, dirOut, numHPatches, numWPatches, resultFileNamePrefix="", resultFileNameSuffix=".npz"):
"""
Saves or prints a MTF estimation.
:param MTF: MTF estimation to save.
:param dirOut: Target directory to save the MTF esitmation.
:param numHPatches: Number of image patches in vertical direction.
:param numWPatches: Number of image patches in horizontal direction.
:param resultFilenamePrefix: Name prefix of the file to save (default: "")
:param resultFilenameSuffix: Name suffix of the file to save (default: ".npz")
:return: None.
"""
# Shape results
resultsH = np.zeros((numHPatches, numWPatches, 8))
resultsV = np.zeros((numHPatches, numWPatches, 8))
for idx in range(len(MTF) // 2):
hIdx = idx % numHPatches
wIdx = idx // numHPatches
resultsH[hIdx, wIdx] = MTF[2 * idx][0][0]
resultsV[hIdx, wIdx] = MTF[2 * idx + 1][0][0]
# Save results, if they not exist yet
if dirOut is not None:
if not os.path.exists(dirOut):
os.makedirs(dirOut)
pathMTFH = os.path.join(dirOut, resultFileNamePrefix + "_MTF-H" + resultFileNameSuffix)
pathMTFV = os.path.join(dirOut, resultFileNamePrefix + "_MTF-V" + resultFileNameSuffix)
if not os.path.exists(pathMTFH) and not os.path.exists(pathMTFV):
np.savez_compressed(pathMTFH, resultsH)
np.savez_compressed(pathMTFV, resultsV)
else:
print("MTF-H:", resultsH)
print("MTF-V:", resultsH)
def estimateMTF(model, imgPathBatch, dirOut=None):
"""
Apply a CNN model for MTF estimation.
:param model: The (ready-to-use) CNN model.
:param imgPathBatch: Array of images paths as input for the MTF estimation (default is a batch size of four images).
:param dirOut: Directory to save the MTF estimation results.
:return: None.
"""
# Create the CNN model
averagingModel = defineAveragingModel(model)
# Load images and extract green channel for MTF estimation (according to the original paper)
firstImageFileName = imgPathBatch[0].split(os.sep)[-1].split(".")[0]
lastImageFileName = imgPathBatch[-1].split(os.sep)[-1].split(".")[0]
imgBatch = [cv2.imread(imgPath) for imgPath in imgPathBatch]
imgBatch = [img[..., 1] for img in imgBatch if img is not None and len(img.shape) == 3]
imgBatch = np.array(imgBatch)
if imgBatch is not None and len(imgBatch) > 0:
# Pre-process images
h, w = imgBatch[0].shape[0:2]
numHPatches, numWPatches = math.ceil(h / IMG_PATCH_SIZE), math.ceil(w / IMG_PATCH_SIZE)
imgs = np.array(preProcessTestImages(imgBatch, IMG_PATCH_SIZE))
# Apply images to the CNN
MTF = averagingModel(tf.convert_to_tensor(imgs), training=False).numpy()
#Save and return results
saveMTFResult(MTF, dirOut, numHPatches, numWPatches, str(firstImageFileName) + "-" + str(lastImageFileName))
def estimateObjDetsMTF(model, imgPathBatch, objDets, dirOut=None):
"""
Apply a CNN model for MTF estimation for object detection patches only.
:param model: The (ready-to-use) CNN model.
:param imgPathBatch: Array of images paths.
:param dirOut: Directory to save the MTF estimation results.
:param objDets: Array of ground truth object detections annotations. One annotation consists of five variables:
objectClass, topLeftXCoordinate, topLeftYCoordinate, bottomRightXCoordinate, bottomRightYCoordinate
(according to the keras-YOLOv3 format).
:return: MTF Estimation results in horizontal and vertical image directions.
"""
# Create CNN model
averagingModel = defineAveragingModel(model)
# Load images and extract green channel for MTF estimation (according to the original paper)
imgBatch = [cv2.imread(imgPath) for imgPath in imgPathBatch]
imgBatch = [img[..., 1] for img in imgBatch if img is not None]
imgBatch = np.array(imgBatch)
if imgBatch is not None and len(imgBatch) > 0:
# Init cache
imgShape = imgBatch[0].shape
objDetRoICacheCoord = []
objDetRoICacheResult = []
# Iterate object detections
for objDetIdx, objDet in enumerate(objDets):
objClass, objx1, objy1, objx2, objy2 = objDet
startX, startY, endX, endY = getObjDetRoI(imgShape, IMG_PATCH_SIZE, objx1, objy1, objx2, objy2)
#Iterate corresponding image patches of detection
numPatchesX = math.ceil((endX - startX) / IMG_PATCH_SIZE)
numPatchesY = math.ceil((endY - startY) / IMG_PATCH_SIZE)
for idxX in range(numPatchesX):
for idxY in range (numPatchesY):
# Get image patch coordinates
startX_, startY_, endX_, endY_ = getPatchRoI(imgShape, IMG_PATCH_SIZE, startX, startY, idxX, idxY)
patchIdx = idxX * numPatchesY + idxY
resultFilenamePrefix = '_' + objClass + '_' + str(objDetIdx) + '_' + str(patchIdx)
# Search cache for fitting entry
cacheHit = False;
for cacheIdx in range(len(objDetRoICacheCoord)):
x1, y1, x2, y2 = objDetRoICacheCoord[cacheIdx]
iou = calculateIoU(startX_, startY_, endX_, endY_, x1, y1, x2, y2)
if iou >= IOU_THRESH:
MTF = objDetRoICacheResult[cacheIdx];
saveMTFResult(MTF, dirOut, 1, 1, resultFilenamePrefix)
cacheHit = True;
break
if cacheHit:
continue
existingFiles = glob.glob(os.path.join(dirOut, "*" + resultFilenamePrefix + "*"))
if len(existingFiles) == 0:
imgPatchBatch = imgBatch[:, startY_:endY_, startX_:endX_]
h, w = imgShape[0:2]
# Image pre-processing
imgs = preProcessTestImages(imgPatchBatch, IMG_PATCH_SIZE)
imgs = np.array(imgs)
# Inference on image batch
MTF = averagingModel(tf.convert_to_tensor(imgs), training=False).numpy()
# Save results
saveMTFResult(MTF, dirOut, 1, 1, resultFilenamePrefix)
objDetRoICacheCoord.append([startX_, startY_, endX_, endY_])
objDetRoICacheResult.append(MTF)
def train(model, trainDataSetPath, checkpointSaveDir):
"""
Train the CNN model for MTF esitmation.
:param model: The untrained CNN model.
:param trainDataSetPath: Path to the TFRecord file containt the training dataset.
:param checkPointSaveDir: Target directory to store the checkpoint files during training.
:return: None
"""
# Load the TFRecord training dataset and split it in training and validation sub-sets.
dataset = readTFRecordDataset(trainDataSetPath)
datasetSize = 160500 # Number of samples in the training dataset. It is not a good style but it saves runtime, though.
trainDataset = dataset.take(int(datasetSize * (1.0 - FRACTION_VALIDATION_DATA))).cache() \
.shuffle(buffer_size=TRAINING_DATASET_BUFFER_SIZE).batch(TRAINING_BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
remaining = dataset.skip(int(datasetSize * (1.0 - FRACTION_VALIDATION_DATA)))
validDataset = remaining.take(int(datasetSize * FRACTION_VALIDATION_DATA)).cache() \
.shuffle(buffer_size=TRAINING_DATASET_BUFFER_SIZE).batch(TRAINING_BATCH_SIZE).prefetch(tf.data.experimental.AUTOTUNE)
# Prepare model callbacks
logdir = os.path.join(checkpointSaveDir, "logs")
# Tensorboard callback
tensordBoardCallback = keras.callbacks.TensorBoard(log_dir=logdir)
# Checkpoint callback.
modelCheckpointCallback = ModelCheckpoint(
filepath=checkpointSaveDir + "/weights-improvement-{epoch:02d}",
save_weights_only=False,
monitor='val_loss',
save_best_only=False,
save_freq=1000
)
callbacks = [
tensordBoardCallback,
modelCheckpointCallback
]
# Start the model training
history = model.fit(trainDataset, epochs=TRAINING_EPOCHS, verbose=1, validation_data=validDataset, initial_epoch=0, shuffle=True, callbacks=callbacks)
print(history.history)
# Example
# if __name__ == '__main__':
# # Train the model
# if False:
# trainDataSetPath = r"../../../data/CNNTrainingData.TFRecord" # Not part of this repository.
# checkPointSaveDir = r"./model/checkpoints"
# M = prepareModel(checkPointSaveDir)
# train(M, trainDataSetPath, checkPointSaveDir)
# # Test the model
# if False:
# imgDirIn = r"../../../data/udacity/img/GT"
# dirOut = r"../../../data/udacity/labels_blur_patchwise/CNN"
# imgFileEnding = ".jpg"
# imgPathBatch = glob.glob(os.path.join(imgDirIn, "*" + imgFileEnding))
# M = prepareModel()
# estimateMTF(M, imgPathBatch, dirOut) | en | 0.697951 | # Only for function "estimateObjDetsMTF" Load or create a CNN model for MTF estimation. :param modelPath: Path to a CNN model to load. :return: Ready-to-use CNN model. # Preparation # Load or create CNN model # Compile CNN model Auxiliary function to extract the fully-connected sub-model. :param mainModel: The full CNN model. :param startIdx: Index of the first layer to extract. :param endIdx: Index of the last layer to extract. :return: Extracted fully-connected sub-model. Auxiliary function add an averaging layer to the CNN model. :param model: CNN model for MTF estimation. :return: CNN model including averaging layer. # Extract sub-models for feature extraction and prediction # Add averaging layer and put all layers together Saves or prints a MTF estimation. :param MTF: MTF estimation to save. :param dirOut: Target directory to save the MTF esitmation. :param numHPatches: Number of image patches in vertical direction. :param numWPatches: Number of image patches in horizontal direction. :param resultFilenamePrefix: Name prefix of the file to save (default: "") :param resultFilenameSuffix: Name suffix of the file to save (default: ".npz") :return: None. # Shape results # Save results, if they not exist yet Apply a CNN model for MTF estimation. :param model: The (ready-to-use) CNN model. :param imgPathBatch: Array of images paths as input for the MTF estimation (default is a batch size of four images). :param dirOut: Directory to save the MTF estimation results. :return: None. # Create the CNN model # Load images and extract green channel for MTF estimation (according to the original paper) # Pre-process images # Apply images to the CNN #Save and return results Apply a CNN model for MTF estimation for object detection patches only. :param model: The (ready-to-use) CNN model. :param imgPathBatch: Array of images paths. :param dirOut: Directory to save the MTF estimation results. :param objDets: Array of ground truth object detections annotations. One annotation consists of five variables: objectClass, topLeftXCoordinate, topLeftYCoordinate, bottomRightXCoordinate, bottomRightYCoordinate (according to the keras-YOLOv3 format). :return: MTF Estimation results in horizontal and vertical image directions. # Create CNN model # Load images and extract green channel for MTF estimation (according to the original paper) # Init cache # Iterate object detections #Iterate corresponding image patches of detection # Get image patch coordinates # Search cache for fitting entry # Image pre-processing # Inference on image batch # Save results Train the CNN model for MTF esitmation. :param model: The untrained CNN model. :param trainDataSetPath: Path to the TFRecord file containt the training dataset. :param checkPointSaveDir: Target directory to store the checkpoint files during training. :return: None # Load the TFRecord training dataset and split it in training and validation sub-sets. # Number of samples in the training dataset. It is not a good style but it saves runtime, though. # Prepare model callbacks # Tensorboard callback # Checkpoint callback. # Start the model training # Example # if __name__ == '__main__': # # Train the model # if False: # trainDataSetPath = r"../../../data/CNNTrainingData.TFRecord" # Not part of this repository. # checkPointSaveDir = r"./model/checkpoints" # M = prepareModel(checkPointSaveDir) # train(M, trainDataSetPath, checkPointSaveDir) # # Test the model # if False: # imgDirIn = r"../../../data/udacity/img/GT" # dirOut = r"../../../data/udacity/labels_blur_patchwise/CNN" # imgFileEnding = ".jpg" # imgPathBatch = glob.glob(os.path.join(imgDirIn, "*" + imgFileEnding)) # M = prepareModel() # estimateMTF(M, imgPathBatch, dirOut) | 2.425426 | 2 |
archivebox/parsers/shaarli_rss.py | TrAyZeN/ArchiveBox | 1 | 6633066 | <filename>archivebox/parsers/shaarli_rss.py
__package__ = 'archivebox.parsers'
from typing import IO, Iterable
from datetime import datetime
from ..index.schema import Link
from ..util import (
htmldecode,
enforce_types,
str_between,
)
@enforce_types
def parse_shaarli_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
"""Parse Shaarli-specific RSS XML-format files into links"""
rss_file.seek(0)
entries = rss_file.read().split('<entry>')[1:]
for entry in entries:
# example entry:
# <entry>
# <title>Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online</title>
# <link href="https://www.heise.de/security/meldung/Aktuelle-Trojaner-Welle-Emotet-lauert-in-gefaelschten-Rechnungsmails-4291268.html" />
# <id>https://demo.shaarli.org/?cEV4vw</id>
# <published>2019-01-30T06:06:01+00:00</published>
# <updated>2019-01-30T06:06:01+00:00</updated>
# <content type="html" xml:lang="en"><![CDATA[<div class="markdown"><p>— <a href="https://demo.shaarli.org/?cEV4vw">Permalink</a></p></div>]]></content>
# </entry>
trailing_removed = entry.split('</entry>', 1)[0]
leading_removed = trailing_removed.strip()
rows = leading_removed.split('\n')
def get_row(key):
return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0]
title = str_between(get_row('title'), '<title>', '</title>').strip()
url = str_between(get_row('link'), '<link href="', '" />')
ts_str = str_between(get_row('published'), '<published>', '</published>')
time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z")
yield Link(
url=htmldecode(url),
timestamp=str(time.timestamp()),
title=htmldecode(title) or None,
tags=None,
sources=[rss_file.name],
)
| <filename>archivebox/parsers/shaarli_rss.py
__package__ = 'archivebox.parsers'
from typing import IO, Iterable
from datetime import datetime
from ..index.schema import Link
from ..util import (
htmldecode,
enforce_types,
str_between,
)
@enforce_types
def parse_shaarli_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
"""Parse Shaarli-specific RSS XML-format files into links"""
rss_file.seek(0)
entries = rss_file.read().split('<entry>')[1:]
for entry in entries:
# example entry:
# <entry>
# <title>Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online</title>
# <link href="https://www.heise.de/security/meldung/Aktuelle-Trojaner-Welle-Emotet-lauert-in-gefaelschten-Rechnungsmails-4291268.html" />
# <id>https://demo.shaarli.org/?cEV4vw</id>
# <published>2019-01-30T06:06:01+00:00</published>
# <updated>2019-01-30T06:06:01+00:00</updated>
# <content type="html" xml:lang="en"><![CDATA[<div class="markdown"><p>— <a href="https://demo.shaarli.org/?cEV4vw">Permalink</a></p></div>]]></content>
# </entry>
trailing_removed = entry.split('</entry>', 1)[0]
leading_removed = trailing_removed.strip()
rows = leading_removed.split('\n')
def get_row(key):
return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0]
title = str_between(get_row('title'), '<title>', '</title>').strip()
url = str_between(get_row('link'), '<link href="', '" />')
ts_str = str_between(get_row('published'), '<published>', '</published>')
time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z")
yield Link(
url=htmldecode(url),
timestamp=str(time.timestamp()),
title=htmldecode(title) or None,
tags=None,
sources=[rss_file.name],
)
| de | 0.29027 | Parse Shaarli-specific RSS XML-format files into links # example entry: # <entry> # <title>Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online</title> # <link href="https://www.heise.de/security/meldung/Aktuelle-Trojaner-Welle-Emotet-lauert-in-gefaelschten-Rechnungsmails-4291268.html" /> # <id>https://demo.shaarli.org/?cEV4vw</id> # <published>2019-01-30T06:06:01+00:00</published> # <updated>2019-01-30T06:06:01+00:00</updated> # <content type="html" xml:lang="en"><![CDATA[<div class="markdown"><p>— <a href="https://demo.shaarli.org/?cEV4vw">Permalink</a></p></div>]]></content> # </entry> | 2.635908 | 3 |
grafana_backup/create_datasource.py | Keimille/grafana-backup-tool | 515 | 6633067 | import json
from grafana_backup.dashboardApi import create_datasource
def main(args, settings, file_path):
grafana_url = settings.get('GRAFANA_URL')
http_post_headers = settings.get('HTTP_POST_HEADERS')
verify_ssl = settings.get('VERIFY_SSL')
client_cert = settings.get('CLIENT_CERT')
debug = settings.get('DEBUG')
with open(file_path, 'r') as f:
data = f.read()
datasource = json.loads(data)
result = create_datasource(json.dumps(datasource), grafana_url, http_post_headers, verify_ssl, client_cert, debug)
print("create datasource: {0}, status: {1}, msg: {2}".format(datasource['name'], result[0], result[1]))
| import json
from grafana_backup.dashboardApi import create_datasource
def main(args, settings, file_path):
grafana_url = settings.get('GRAFANA_URL')
http_post_headers = settings.get('HTTP_POST_HEADERS')
verify_ssl = settings.get('VERIFY_SSL')
client_cert = settings.get('CLIENT_CERT')
debug = settings.get('DEBUG')
with open(file_path, 'r') as f:
data = f.read()
datasource = json.loads(data)
result = create_datasource(json.dumps(datasource), grafana_url, http_post_headers, verify_ssl, client_cert, debug)
print("create datasource: {0}, status: {1}, msg: {2}".format(datasource['name'], result[0], result[1]))
| none | 1 | 2.593093 | 3 |
|
pciSeq/src/preprocess/cell_merger.py | acycliq/pciSeq | 10 | 6633068 | <gh_stars>1-10
import os
import shutil
import logging
import itertools
import numpy as np
from collections import defaultdict
from pciSeq.src.preprocess.post import Post_merge
from pciSeq.src.preprocess.utils import _to_csr_matrix, _get_connected_labels
from scipy.sparse.csgraph import connected_components
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s"
)
logger = logging.getLogger()
class Merge_register(object):
def __init__(self, parent):
self.entries = defaultdict(list)
self.parent = parent
def update_register(self, tile_id, label, old_label):
self.entries[label].append(tile_id)
self.entries[label] = sorted(list(set(self.entries[label])))
logger.info('tile_%d: label %d ---> label %d' % (tile_id, old_label, label))
if (old_label != label) and (old_label < 0):
self.replace_label(old_label, label)
def replace_label(self, old_label, label):
# replace the register
_dict = self.entries
for tile_id in _dict[old_label]:
if tile_id not in _dict[label]:
_dict[label].append(tile_id)
mask = self.parent.tiles[tile_id]['label_image'].data == old_label
self.parent.tiles[tile_id]['label_image'].data[mask] = label
logger.info('tile: %d: replaced labels in "label_image" that were equal to %d with %d' % (tile_id, old_label, label))
logger.info('Dropped key, value pair: (%d, %s) from the merge_register' % (old_label, _dict[old_label]))
_dict.pop(old_label)
# ----------------------------------------------------------------------------------------------------------------------
class Stage(object):
def __init__(self, tile_obj, spots_all):
self.counter = itertools.count()
self.merge_register = Merge_register(self)
self.cell_props = None
self.spots = None
self.tiles = tile_obj.tiles
self.tiles_across = tile_obj.tiles_across
self.tiles_down = tile_obj.tiles_down
self.tile_shape = tile_obj.tile_shape
self.scaling_factor = 1 # ti
self.compose_dict(spots_all)
def compose_dict(self, spots_all):
"""
Mutates in-place the tile object by adding two more key/value pairs
Parameters
----------
spots_all: dataframe
Contains all the spots (Gene names and x,y coords) for the full image
"""
for i, d in enumerate(self.tiles):
# d['label_image'] = self.tile_label_image(i) # label_image for the i-th tile
d['spots'] = self.tile_spots(spots_all, i) # spots for the i-th tile
def tile_spots(self, data, i):
""" spots for the i-th tile """
x_range = self.tiles[i]['tile_range']['x']
y_range = self.tiles[i]['tile_range']['y']
mask = (data.x.values >= x_range[0]) & \
(data.x.values < x_range[1]) & \
(data.y.values >= y_range[0]) & \
(data.y.values < y_range[1])
df = data[mask].dropna()
df = df[['Gene', 'x', 'y']]
df = df[~df.duplicated()]
gene_name, idx = np.unique(df.Gene.values, return_inverse=True)
df['gene_id'] = idx # this is effectively the gene id
df['x'] = df.x * self.scaling_factor
df['y'] = df.y * self.scaling_factor
df = df.sort_values(['x', 'y'], ascending=[True, True]) \
.reset_index(drop=True) # <-- DO NOT FORGET TO RESET THE INDEX
return df
def post_merge(self, argin):
pm = Post_merge(argin[0], argin[1], argin[2])
pm.run()
def label_generator(self):
return -1 * (next(self.counter) + 1)
def merge_cells(self):
""" Merge cells clipped by two or more tiles. """
for tile in self.tiles:
logger.info('\n')
logger.info('Doing tile %i' % tile['tile_id'])
self.merge(tile)
logger.info('Relabelling finished')
logger.info('\n')
def merge(self, tile):
"""
Does most of the heavy lifting for cell merging. Mutates in-place the label_image arrays of three tiles.
If tile has tile_id = i then the mutated label_images are for the tiles with:
tile_id = i
tile_id = i + 1 (the neighbouring tile at the right)
tile_id = i - #tiles_across (the neighbouring tile at the top)
Parameters
----------
tile: an instance of the class Fov
Notes
-----
Is is assumed that each tile is big enough (relative to the cells) so that there is no cell bigger in size that a tile.
For example, the most complicated case will be a cell clipped by four tiles forming a 2x2 setup with the cell centroid close
at the intersection of the four tiles
"""
tile_id = tile['tile_id']
adj_img = self.adjacent_tile(tile_id)
logger.info('tile_%d neighbours: (above, left): (%s, %s)' % (tile_id, adj_img['up'], adj_img['left']))
# Bottom border of the label array above
if (adj_img['up'] is not None) and np.any(self.tiles[adj_img['up']]['label_image'].data):
tile_up = self.tiles[adj_img['up']]
coo_aa, coo_bb = self.dissolve_borders(tile_up, tile, transpose=True)
tile_up['label_image'] = coo_aa
tile['label_image'] = coo_bb
if adj_img['left'] is not None:
tile_left = self.tiles[adj_img['left']]
coo_a, coo_b = self.dissolve_borders(tile_left, tile)
tile_left['label_image'] = coo_a
tile['label_image'] = coo_b
def adjacent_tile(self, tile_id):
if tile_id % self.tiles_across != 0:
left = tile_id - 1
else:
left = None
if tile_id >= self.tiles_across:
up = tile_id - self.tiles_across
else:
up = None
return {'left': left, 'up': up}
def dissolve_borders(self, adjc_tile, tile, transpose=False):
"""
Compares the label_image arrays from two neighbouring (one next another) tiles. If the last column of the
label_image at the left and the first column of the one at the right have non-zero values at the same location
then the labels at these locations are assigned a new and common label
Parameters
----------
adjc_tile: an instance of the class Fov
The neighbouring tile. Could be the neighbour from the right, or from above
tile: an instance of the class Fov
the current tile
transpose: bool. Optional
if adjc_tile is the neighbour from the top, then set this to True. Default is False
Returns
-------
temp_a, temp_b: tuple
A tuple of two label_image arrays that correspond to the adjacent and the current tile respectively
"""
if transpose:
adjc_img = adjc_tile['label_image'].transpose()
img = tile['label_image'].transpose()
else:
adjc_img = adjc_tile['label_image']
img = tile['label_image']
arr = adjc_img.toarray()
adjc_border = arr[:, -1]
border = img.toarray()[:, 0]
logger.info('length of adjc_border: %d' % adjc_border.shape[0])
logger.info('length of adjc_border: %d' % border.shape[0])
matched_labels = self.connect_labels(adjc_border, border)
temp_a = self.tiles[adjc_tile['tile_id']]['label_image'].copy()
temp_b = self.tiles[tile['tile_id']]['label_image'].copy()
for d in matched_labels:
new_label = self._new_label(d)
for x in d['a']:
temp_a.data[temp_a.data == x] = new_label
self.merge_register.update_register(adjc_tile['tile_id'], new_label, x)
# logger.info('tile_%d: label %d ---> label %d' % (adjc_tile['tile_id'], x, new_label))
for x in d['b']:
temp_b.data[temp_b.data == x] = new_label
self.merge_register.update_register(tile['tile_id'], new_label, x)
# logger.info('tile_%d: label %d ---> label %d' % (tile['tile_id'], x, new_label))
return temp_a, temp_b
def _new_label(self, d):
# get a list from the dict values
_list = [x[0] for x in list(d.values())]
# Find the biggest non-positive value
m = sorted([el for el in _list if el < 0])
# m = list(set(m))
if len(m) > 0:
# label has already been reassigned. Give that to all merging cells and do not generate a new label.
out = m[-1]
# I think m should contain the same elements anyway. If len(set(m)) > 1 then something went wrong??
logger.info('m is: %s' % m)
else:
out = self.label_generator()
assert out < 0, 'Generated labels should be negative'
return out
def connect_labels(self, par_a, par_b):
'''
compares two list-like input objects of the same size and returns the elements in ''par_a''
and ''par_b'' which are non-zero and have the same index position in both inputs
Example connect_labels([0,0,0,2,2,2,4,7], [0,2,2,2,2,2,2,9]) returns
[
{'a': [2, 4], 'b': [2]},
{'a': [7], 'b': [9]}
]
which means that from the first arg the values 2 and 4 meet (have the same position in the array)
with the value of 2 from the second arg.
Also value 7 from the first arg has the same position with value 9 in the second arg. They are the
last elements in both lists
:param a: list
:param b: list
:return:
'''
assert len(par_a) == len(par_b), "inputs to the function should have the same length"
a, b, lookup_label_a, lookup_label_b = self._shift_labels(par_a, par_b)
assert len(a) == len(b), "a and b do not have the same length"
assert len(a) == len(par_a)
assert len(b) == len(par_b)
# Make sure the two list do not have common elements
a_b = [d for d in a if d in b and d > 0] # intersection of a and b
assert not a_b, 'The two inputs should not have common elements'
connected_dict = []
# Find now which labels should be merged
# mapped will be a list of 2d tuples. For example:
# If mapped = [(7,2), (7,5), (8,1)]
# it means that:
# label 7 and 2 should be considered the same
# label 7 and 5 should be considered the same, hence 2 and 5 are also the same
# label 8 and 1 should be considered the same
t = set([d for d in list(zip(a, b)) if 0 not in d])
mapped = list(zip(*t))
if mapped:
nlabels = np.array([a, b]).max()
mat = _to_csr_matrix(mapped[0], mapped[1], nlabels + 1)
n_components, labels = connected_components(csgraph=mat, directed=False, return_labels=True)
connected_labels = _get_connected_labels(labels)
_aa = []
_bb = []
for _list in connected_labels:
_a = [lookup_label_a[d] for d in _list if d in a]
_b = [lookup_label_b[d] for d in _list if d in b]
connected_dict.append({'a': _a, 'b': _b})
else:
connected_labels = []
connected_dict = []
# print(connected_labels)
return connected_dict
def _shift_labels(self, a, b):
# New labels must be negative
#
# Shifts the non-zero elements of a and b so that both lists a and b have values >= 0
# then shifts a only if a and b have common elements so that they do not intersect
# example: _shift_labels([2,1], [20,10]) gives:
# (array([2, 1]), array([20, 10]), {2: 2, 1: 1}, {20: 20, 10: 10})
# nothing really changes since the input data are well formed, hence no shift of any kind has to be taken
# For _shift_labels([-2,-1], [2,1]) then the output is
# (array([1, 2]), array([5, 4]), {1: -2, 2: -1}, {5: 2, 4: 1})
# because [-2, -1] has be be shifted by 3 to become positive: [1, 2]. The same shift is also applied
# to the second list, [2, 1], which becomes [5, 4]
#
a = np.array(a)
b = np.array(b)
_a = a.copy()
_b = b.copy()
mina = min(a)
minb = min(b)
if mina < 0 or minb < 0:
k1 = abs(min(mina, minb)) + 1
else:
k1 = 0
a[a != 0] = a[a != 0] + k1
b[b != 0] = b[b != 0] + k1
# take the intersection
if np.any(np.in1d(a, b)):
a_b = a[np.in1d(a, b)]
a_b = a_b[a_b > 0]
else:
a_b = []
if np.any(a_b) & (np.any(a) & np.any(b)):
k2 = max([max(a), max(b)])
else:
k2 = 0
a[a > 0] = a[a > 0] + k2
# make a map to link the shifted labels with the original ones
assert len(a) == len(_a)
assert len(b) == len(_b)
rmap_a = {a[i]: _a[i] for i, d in enumerate(a)}
rmap_b = {b[i]: _b[i] for i, d in enumerate(b)}
return a, b, rmap_a, rmap_b
def collate_arrays(self, d):
arr = self.tile_topo(d)
stacked_rows = []
for row in arr:
row_temp = []
for id in row:
if np.isnan(id):
arr = np.zeros(self.tile_shape).astype(np.int32)
else:
id = id.astype(np.int32)
arr = self.tiles[id]['label_image'].toarray().astype(np.int32)
row_temp.append(arr)
stacked_rows.append(np.hstack(row_temp))
if len(stacked_rows) > 0:
rows = self._padded(stacked_rows) # <---- I THINK THIS IS NOT NEEDED ANYMORE
return np.vstack(rows)
else:
return np.array([])
def tile_topo(self, d):
a = np.arange(self.tiles_down * self.tiles_across).reshape((self.tiles_down, self.tiles_across))
mask = np.isin(a, d)
return np.where(mask, a, np.nan)[mask.any(axis=1)][:, mask.any(axis=0)]
def _padded(self, data):
dims = max([d.shape for d in data])
out = []
for d in data:
if d.shape != dims:
p = np.zeros(dims)
p[:d.shape[0], :d.shape[1]] = d
out.append(p)
else:
out.append(d)
return out
def find_offset(self, tile_ids):
sanity_check = np.array([self.tiles[d]['tile_id'] == d for d in tile_ids])
assert np.all(sanity_check)
offset_x = min([self.tiles[d]['tile_offset_x'] for d in tile_ids])
offset_y = min([self.tiles[d]['tile_offset_y'] for d in tile_ids])
return offset_x, offset_y
def assign_cell_id(self):
""" Add an extra column to be used as cell id
This should be made redundant. The label can be used instead.
"""
cell_id = self.cell_props.label - 1
cell_id[cell_id < 0] = np.nan
return cell_id
def writer(self, dirpath):
'''
save the data to the flatfile
:return:
'''
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
os.mkdir(dirpath)
# 1. save the cell props
cell_props = self.cell_props.copy()
cell_props['x'] = cell_props.x.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
cell_props['y'] = cell_props.y.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
cell_props['cell_id'] = cell_props.cell_id.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
cell_props['label'] = cell_props.label.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
cells_headers = ['cell_id', 'label', 'tile_id', 'area', 'x', 'y']
cell_props[cells_headers].to_csv(os.path.join(dirpath, '_cells.csv'), index=False)
# 2. save the cell coords
coords_headers = ['cell_id', 'label', 'coords']
cell_props[coords_headers].to_json(os.path.join(dirpath, '_cellCoords.json'), orient='records')
# 3. save the spots
spots_df = self.spots.copy()
spots_df['target'] = spots_df.Gene
spots_df['x_global'] = spots_df.x
spots_df['y_global'] = spots_df.y
spots_df['tile_id'] = spots_df.tile_id
spots_df['x_cell'] = spots_df.x_cell.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
spots_df['y_cell'] = spots_df.y_cell.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
spots_headers = ['x_global', 'y_global', 'tile_id', 'label', 'target', 'x_cell', 'y_cell']
spots_df[spots_headers].to_csv(os.path.join(dirpath, '_spots.csv'), index=False)
logger.info('Total number of collected spots: %d' % spots_df.shape[0])
return cell_props[cells_headers], cell_props[coords_headers], spots_df[spots_headers]
| import os
import shutil
import logging
import itertools
import numpy as np
from collections import defaultdict
from pciSeq.src.preprocess.post import Post_merge
from pciSeq.src.preprocess.utils import _to_csr_matrix, _get_connected_labels
from scipy.sparse.csgraph import connected_components
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s:%(levelname)s:%(message)s"
)
logger = logging.getLogger()
class Merge_register(object):
def __init__(self, parent):
self.entries = defaultdict(list)
self.parent = parent
def update_register(self, tile_id, label, old_label):
self.entries[label].append(tile_id)
self.entries[label] = sorted(list(set(self.entries[label])))
logger.info('tile_%d: label %d ---> label %d' % (tile_id, old_label, label))
if (old_label != label) and (old_label < 0):
self.replace_label(old_label, label)
def replace_label(self, old_label, label):
# replace the register
_dict = self.entries
for tile_id in _dict[old_label]:
if tile_id not in _dict[label]:
_dict[label].append(tile_id)
mask = self.parent.tiles[tile_id]['label_image'].data == old_label
self.parent.tiles[tile_id]['label_image'].data[mask] = label
logger.info('tile: %d: replaced labels in "label_image" that were equal to %d with %d' % (tile_id, old_label, label))
logger.info('Dropped key, value pair: (%d, %s) from the merge_register' % (old_label, _dict[old_label]))
_dict.pop(old_label)
# ----------------------------------------------------------------------------------------------------------------------
class Stage(object):
def __init__(self, tile_obj, spots_all):
self.counter = itertools.count()
self.merge_register = Merge_register(self)
self.cell_props = None
self.spots = None
self.tiles = tile_obj.tiles
self.tiles_across = tile_obj.tiles_across
self.tiles_down = tile_obj.tiles_down
self.tile_shape = tile_obj.tile_shape
self.scaling_factor = 1 # ti
self.compose_dict(spots_all)
def compose_dict(self, spots_all):
"""
Mutates in-place the tile object by adding two more key/value pairs
Parameters
----------
spots_all: dataframe
Contains all the spots (Gene names and x,y coords) for the full image
"""
for i, d in enumerate(self.tiles):
# d['label_image'] = self.tile_label_image(i) # label_image for the i-th tile
d['spots'] = self.tile_spots(spots_all, i) # spots for the i-th tile
def tile_spots(self, data, i):
""" spots for the i-th tile """
x_range = self.tiles[i]['tile_range']['x']
y_range = self.tiles[i]['tile_range']['y']
mask = (data.x.values >= x_range[0]) & \
(data.x.values < x_range[1]) & \
(data.y.values >= y_range[0]) & \
(data.y.values < y_range[1])
df = data[mask].dropna()
df = df[['Gene', 'x', 'y']]
df = df[~df.duplicated()]
gene_name, idx = np.unique(df.Gene.values, return_inverse=True)
df['gene_id'] = idx # this is effectively the gene id
df['x'] = df.x * self.scaling_factor
df['y'] = df.y * self.scaling_factor
df = df.sort_values(['x', 'y'], ascending=[True, True]) \
.reset_index(drop=True) # <-- DO NOT FORGET TO RESET THE INDEX
return df
def post_merge(self, argin):
pm = Post_merge(argin[0], argin[1], argin[2])
pm.run()
def label_generator(self):
return -1 * (next(self.counter) + 1)
def merge_cells(self):
""" Merge cells clipped by two or more tiles. """
for tile in self.tiles:
logger.info('\n')
logger.info('Doing tile %i' % tile['tile_id'])
self.merge(tile)
logger.info('Relabelling finished')
logger.info('\n')
def merge(self, tile):
"""
Does most of the heavy lifting for cell merging. Mutates in-place the label_image arrays of three tiles.
If tile has tile_id = i then the mutated label_images are for the tiles with:
tile_id = i
tile_id = i + 1 (the neighbouring tile at the right)
tile_id = i - #tiles_across (the neighbouring tile at the top)
Parameters
----------
tile: an instance of the class Fov
Notes
-----
Is is assumed that each tile is big enough (relative to the cells) so that there is no cell bigger in size that a tile.
For example, the most complicated case will be a cell clipped by four tiles forming a 2x2 setup with the cell centroid close
at the intersection of the four tiles
"""
tile_id = tile['tile_id']
adj_img = self.adjacent_tile(tile_id)
logger.info('tile_%d neighbours: (above, left): (%s, %s)' % (tile_id, adj_img['up'], adj_img['left']))
# Bottom border of the label array above
if (adj_img['up'] is not None) and np.any(self.tiles[adj_img['up']]['label_image'].data):
tile_up = self.tiles[adj_img['up']]
coo_aa, coo_bb = self.dissolve_borders(tile_up, tile, transpose=True)
tile_up['label_image'] = coo_aa
tile['label_image'] = coo_bb
if adj_img['left'] is not None:
tile_left = self.tiles[adj_img['left']]
coo_a, coo_b = self.dissolve_borders(tile_left, tile)
tile_left['label_image'] = coo_a
tile['label_image'] = coo_b
def adjacent_tile(self, tile_id):
if tile_id % self.tiles_across != 0:
left = tile_id - 1
else:
left = None
if tile_id >= self.tiles_across:
up = tile_id - self.tiles_across
else:
up = None
return {'left': left, 'up': up}
def dissolve_borders(self, adjc_tile, tile, transpose=False):
"""
Compares the label_image arrays from two neighbouring (one next another) tiles. If the last column of the
label_image at the left and the first column of the one at the right have non-zero values at the same location
then the labels at these locations are assigned a new and common label
Parameters
----------
adjc_tile: an instance of the class Fov
The neighbouring tile. Could be the neighbour from the right, or from above
tile: an instance of the class Fov
the current tile
transpose: bool. Optional
if adjc_tile is the neighbour from the top, then set this to True. Default is False
Returns
-------
temp_a, temp_b: tuple
A tuple of two label_image arrays that correspond to the adjacent and the current tile respectively
"""
if transpose:
adjc_img = adjc_tile['label_image'].transpose()
img = tile['label_image'].transpose()
else:
adjc_img = adjc_tile['label_image']
img = tile['label_image']
arr = adjc_img.toarray()
adjc_border = arr[:, -1]
border = img.toarray()[:, 0]
logger.info('length of adjc_border: %d' % adjc_border.shape[0])
logger.info('length of adjc_border: %d' % border.shape[0])
matched_labels = self.connect_labels(adjc_border, border)
temp_a = self.tiles[adjc_tile['tile_id']]['label_image'].copy()
temp_b = self.tiles[tile['tile_id']]['label_image'].copy()
for d in matched_labels:
new_label = self._new_label(d)
for x in d['a']:
temp_a.data[temp_a.data == x] = new_label
self.merge_register.update_register(adjc_tile['tile_id'], new_label, x)
# logger.info('tile_%d: label %d ---> label %d' % (adjc_tile['tile_id'], x, new_label))
for x in d['b']:
temp_b.data[temp_b.data == x] = new_label
self.merge_register.update_register(tile['tile_id'], new_label, x)
# logger.info('tile_%d: label %d ---> label %d' % (tile['tile_id'], x, new_label))
return temp_a, temp_b
def _new_label(self, d):
# get a list from the dict values
_list = [x[0] for x in list(d.values())]
# Find the biggest non-positive value
m = sorted([el for el in _list if el < 0])
# m = list(set(m))
if len(m) > 0:
# label has already been reassigned. Give that to all merging cells and do not generate a new label.
out = m[-1]
# I think m should contain the same elements anyway. If len(set(m)) > 1 then something went wrong??
logger.info('m is: %s' % m)
else:
out = self.label_generator()
assert out < 0, 'Generated labels should be negative'
return out
def connect_labels(self, par_a, par_b):
'''
compares two list-like input objects of the same size and returns the elements in ''par_a''
and ''par_b'' which are non-zero and have the same index position in both inputs
Example connect_labels([0,0,0,2,2,2,4,7], [0,2,2,2,2,2,2,9]) returns
[
{'a': [2, 4], 'b': [2]},
{'a': [7], 'b': [9]}
]
which means that from the first arg the values 2 and 4 meet (have the same position in the array)
with the value of 2 from the second arg.
Also value 7 from the first arg has the same position with value 9 in the second arg. They are the
last elements in both lists
:param a: list
:param b: list
:return:
'''
assert len(par_a) == len(par_b), "inputs to the function should have the same length"
a, b, lookup_label_a, lookup_label_b = self._shift_labels(par_a, par_b)
assert len(a) == len(b), "a and b do not have the same length"
assert len(a) == len(par_a)
assert len(b) == len(par_b)
# Make sure the two list do not have common elements
a_b = [d for d in a if d in b and d > 0] # intersection of a and b
assert not a_b, 'The two inputs should not have common elements'
connected_dict = []
# Find now which labels should be merged
# mapped will be a list of 2d tuples. For example:
# If mapped = [(7,2), (7,5), (8,1)]
# it means that:
# label 7 and 2 should be considered the same
# label 7 and 5 should be considered the same, hence 2 and 5 are also the same
# label 8 and 1 should be considered the same
t = set([d for d in list(zip(a, b)) if 0 not in d])
mapped = list(zip(*t))
if mapped:
nlabels = np.array([a, b]).max()
mat = _to_csr_matrix(mapped[0], mapped[1], nlabels + 1)
n_components, labels = connected_components(csgraph=mat, directed=False, return_labels=True)
connected_labels = _get_connected_labels(labels)
_aa = []
_bb = []
for _list in connected_labels:
_a = [lookup_label_a[d] for d in _list if d in a]
_b = [lookup_label_b[d] for d in _list if d in b]
connected_dict.append({'a': _a, 'b': _b})
else:
connected_labels = []
connected_dict = []
# print(connected_labels)
return connected_dict
def _shift_labels(self, a, b):
# New labels must be negative
#
# Shifts the non-zero elements of a and b so that both lists a and b have values >= 0
# then shifts a only if a and b have common elements so that they do not intersect
# example: _shift_labels([2,1], [20,10]) gives:
# (array([2, 1]), array([20, 10]), {2: 2, 1: 1}, {20: 20, 10: 10})
# nothing really changes since the input data are well formed, hence no shift of any kind has to be taken
# For _shift_labels([-2,-1], [2,1]) then the output is
# (array([1, 2]), array([5, 4]), {1: -2, 2: -1}, {5: 2, 4: 1})
# because [-2, -1] has be be shifted by 3 to become positive: [1, 2]. The same shift is also applied
# to the second list, [2, 1], which becomes [5, 4]
#
a = np.array(a)
b = np.array(b)
_a = a.copy()
_b = b.copy()
mina = min(a)
minb = min(b)
if mina < 0 or minb < 0:
k1 = abs(min(mina, minb)) + 1
else:
k1 = 0
a[a != 0] = a[a != 0] + k1
b[b != 0] = b[b != 0] + k1
# take the intersection
if np.any(np.in1d(a, b)):
a_b = a[np.in1d(a, b)]
a_b = a_b[a_b > 0]
else:
a_b = []
if np.any(a_b) & (np.any(a) & np.any(b)):
k2 = max([max(a), max(b)])
else:
k2 = 0
a[a > 0] = a[a > 0] + k2
# make a map to link the shifted labels with the original ones
assert len(a) == len(_a)
assert len(b) == len(_b)
rmap_a = {a[i]: _a[i] for i, d in enumerate(a)}
rmap_b = {b[i]: _b[i] for i, d in enumerate(b)}
return a, b, rmap_a, rmap_b
def collate_arrays(self, d):
arr = self.tile_topo(d)
stacked_rows = []
for row in arr:
row_temp = []
for id in row:
if np.isnan(id):
arr = np.zeros(self.tile_shape).astype(np.int32)
else:
id = id.astype(np.int32)
arr = self.tiles[id]['label_image'].toarray().astype(np.int32)
row_temp.append(arr)
stacked_rows.append(np.hstack(row_temp))
if len(stacked_rows) > 0:
rows = self._padded(stacked_rows) # <---- I THINK THIS IS NOT NEEDED ANYMORE
return np.vstack(rows)
else:
return np.array([])
def tile_topo(self, d):
a = np.arange(self.tiles_down * self.tiles_across).reshape((self.tiles_down, self.tiles_across))
mask = np.isin(a, d)
return np.where(mask, a, np.nan)[mask.any(axis=1)][:, mask.any(axis=0)]
def _padded(self, data):
dims = max([d.shape for d in data])
out = []
for d in data:
if d.shape != dims:
p = np.zeros(dims)
p[:d.shape[0], :d.shape[1]] = d
out.append(p)
else:
out.append(d)
return out
def find_offset(self, tile_ids):
sanity_check = np.array([self.tiles[d]['tile_id'] == d for d in tile_ids])
assert np.all(sanity_check)
offset_x = min([self.tiles[d]['tile_offset_x'] for d in tile_ids])
offset_y = min([self.tiles[d]['tile_offset_y'] for d in tile_ids])
return offset_x, offset_y
def assign_cell_id(self):
""" Add an extra column to be used as cell id
This should be made redundant. The label can be used instead.
"""
cell_id = self.cell_props.label - 1
cell_id[cell_id < 0] = np.nan
return cell_id
def writer(self, dirpath):
'''
save the data to the flatfile
:return:
'''
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
os.mkdir(dirpath)
# 1. save the cell props
cell_props = self.cell_props.copy()
cell_props['x'] = cell_props.x.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
cell_props['y'] = cell_props.y.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
cell_props['cell_id'] = cell_props.cell_id.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
cell_props['label'] = cell_props.label.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
cells_headers = ['cell_id', 'label', 'tile_id', 'area', 'x', 'y']
cell_props[cells_headers].to_csv(os.path.join(dirpath, '_cells.csv'), index=False)
# 2. save the cell coords
coords_headers = ['cell_id', 'label', 'coords']
cell_props[coords_headers].to_json(os.path.join(dirpath, '_cellCoords.json'), orient='records')
# 3. save the spots
spots_df = self.spots.copy()
spots_df['target'] = spots_df.Gene
spots_df['x_global'] = spots_df.x
spots_df['y_global'] = spots_df.y
spots_df['tile_id'] = spots_df.tile_id
spots_df['x_cell'] = spots_df.x_cell.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
spots_df['y_cell'] = spots_df.y_cell.fillna(-1).astype(int).astype('str').replace('-1', np.nan)
spots_headers = ['x_global', 'y_global', 'tile_id', 'label', 'target', 'x_cell', 'y_cell']
spots_df[spots_headers].to_csv(os.path.join(dirpath, '_spots.csv'), index=False)
logger.info('Total number of collected spots: %d' % spots_df.shape[0])
return cell_props[cells_headers], cell_props[coords_headers], spots_df[spots_headers] | en | 0.810018 | # replace the register # ---------------------------------------------------------------------------------------------------------------------- # ti Mutates in-place the tile object by adding two more key/value pairs Parameters ---------- spots_all: dataframe Contains all the spots (Gene names and x,y coords) for the full image # d['label_image'] = self.tile_label_image(i) # label_image for the i-th tile # spots for the i-th tile spots for the i-th tile # this is effectively the gene id # <-- DO NOT FORGET TO RESET THE INDEX Merge cells clipped by two or more tiles. Does most of the heavy lifting for cell merging. Mutates in-place the label_image arrays of three tiles. If tile has tile_id = i then the mutated label_images are for the tiles with: tile_id = i tile_id = i + 1 (the neighbouring tile at the right) tile_id = i - #tiles_across (the neighbouring tile at the top) Parameters ---------- tile: an instance of the class Fov Notes ----- Is is assumed that each tile is big enough (relative to the cells) so that there is no cell bigger in size that a tile. For example, the most complicated case will be a cell clipped by four tiles forming a 2x2 setup with the cell centroid close at the intersection of the four tiles # Bottom border of the label array above Compares the label_image arrays from two neighbouring (one next another) tiles. If the last column of the label_image at the left and the first column of the one at the right have non-zero values at the same location then the labels at these locations are assigned a new and common label Parameters ---------- adjc_tile: an instance of the class Fov The neighbouring tile. Could be the neighbour from the right, or from above tile: an instance of the class Fov the current tile transpose: bool. Optional if adjc_tile is the neighbour from the top, then set this to True. Default is False Returns ------- temp_a, temp_b: tuple A tuple of two label_image arrays that correspond to the adjacent and the current tile respectively # logger.info('tile_%d: label %d ---> label %d' % (adjc_tile['tile_id'], x, new_label)) # logger.info('tile_%d: label %d ---> label %d' % (tile['tile_id'], x, new_label)) # get a list from the dict values # Find the biggest non-positive value # m = list(set(m)) # label has already been reassigned. Give that to all merging cells and do not generate a new label. # I think m should contain the same elements anyway. If len(set(m)) > 1 then something went wrong?? compares two list-like input objects of the same size and returns the elements in ''par_a'' and ''par_b'' which are non-zero and have the same index position in both inputs Example connect_labels([0,0,0,2,2,2,4,7], [0,2,2,2,2,2,2,9]) returns [ {'a': [2, 4], 'b': [2]}, {'a': [7], 'b': [9]} ] which means that from the first arg the values 2 and 4 meet (have the same position in the array) with the value of 2 from the second arg. Also value 7 from the first arg has the same position with value 9 in the second arg. They are the last elements in both lists :param a: list :param b: list :return: # Make sure the two list do not have common elements # intersection of a and b # Find now which labels should be merged # mapped will be a list of 2d tuples. For example: # If mapped = [(7,2), (7,5), (8,1)] # it means that: # label 7 and 2 should be considered the same # label 7 and 5 should be considered the same, hence 2 and 5 are also the same # label 8 and 1 should be considered the same # print(connected_labels) # New labels must be negative # # Shifts the non-zero elements of a and b so that both lists a and b have values >= 0 # then shifts a only if a and b have common elements so that they do not intersect # example: _shift_labels([2,1], [20,10]) gives: # (array([2, 1]), array([20, 10]), {2: 2, 1: 1}, {20: 20, 10: 10}) # nothing really changes since the input data are well formed, hence no shift of any kind has to be taken # For _shift_labels([-2,-1], [2,1]) then the output is # (array([1, 2]), array([5, 4]), {1: -2, 2: -1}, {5: 2, 4: 1}) # because [-2, -1] has be be shifted by 3 to become positive: [1, 2]. The same shift is also applied # to the second list, [2, 1], which becomes [5, 4] # # take the intersection # make a map to link the shifted labels with the original ones # <---- I THINK THIS IS NOT NEEDED ANYMORE Add an extra column to be used as cell id This should be made redundant. The label can be used instead. save the data to the flatfile :return: # 1. save the cell props # 2. save the cell coords # 3. save the spots | 2.258586 | 2 |
check.py | nhmanas/amdgpu-pro-fans-gui | 0 | 6633069 | <reponame>nhmanas/amdgpu-pro-fans-gui
import subprocess
def temp():
return subprocess.run(["watch", "-n", "2", "sensors"]) | import subprocess
def temp():
return subprocess.run(["watch", "-n", "2", "sensors"]) | none | 1 | 1.90302 | 2 |
|
tan/experiments/trainer.py | leao1995/tan | 0 | 6633070 | import tensorflow as tf
import numpy as np
import os
TRAIN = 'train'
VALID = 'valid'
TEST = 'test'
# TODO: chance name.
class RedTrainer:
"""Encapsulates the logic for training a sequence model.
Args:
fetchers: A dictionary of fetchers for training, validation, and testing
datasets-
{TRAIN: train_fetcher, VALID: valid_fetcher, TEST: test_fetcher}.
Each fetcher implements functions
...
loss: scalar, loss to optimize.
input_data: placeholder for N x d covariates
batch_size: int, size of batches for training.
sess: tf session to train in.
init_lr: scalar, initial learning rate.
lr_decay: scalar, multiplicative decay factor of learning rate.
decay_interval: int, number of batches between decay of learning rate.
min_lr: scalar, minimum value of learning rate to use.
penalty: scalar, multiplier to ridge penalty.
dropout_keeprate: scalar placeholder for dropout value
dropout_keeprate_val: real 0< <=1 of kept dropout rate for training
train_iters: int, number of batches to train model for.
hold_iters: int, number validation batches to use.
print_iters: int, print training stats (like loss) every print_iters
batches.
hold_interval: int, print validation stats every hold_intervals.
iters_pl: optional placeholder/tensor for iterations.
# TODO: what is this for?
iters_func:
optimizer_class: class of tf optimizer to use.
max_grad_norm: scalar, norm to clip gradients to.
do_check: boolean indicating whether to use check_ops for debugging.
momentum: Deprecated.
momentum_iter: Deprecated.
rms_decay: Deprecated.
rms_eps: Deprecated.
pretrain_scope: variable scope to match variables with re.match to
pretrain.
pretrain_iters: int, number of batches to pretrain for.
conditioning_data: placeholder of N x p extraneous covariates.
summary_log_path: string, path to save log files to.
save_path: string, path to save the graph to.
sampler:
input_sample:
nsamp:
samp_per_cond:
"""
def __init__(self, fetchers, loss, input_data, llikes,
batch_size=128, sess=None,
# Learning rate.
init_lr=0.1, lr_decay=0.9, decay_interval=10000, min_lr=None,
# Regularization.
penalty=0.0, dropout_keeprate=None, dropout_keeprate_val=1.0,
# Iteration configs.
# TODO: change hold_iter to valid_interval
# TODO: change iters to intervals
train_iters=100000, hold_iters=1000, print_iters=100,
hold_interval=1000,
iters_pl=None, iters_func=lambda x: x,
# Optimizer configs.
optimizer_class=tf.train.GradientDescentOptimizer,
max_grad_norm=None, do_check=False,
# Momentum.
# TODO: remove.
momentum=None, momentum_iter=1500, rms_decay=0.9,
rms_eps=1e-10,
# Pretraining.
pretrain_scope=None, pretrain_iters=5000,
# Conditioning.
conditioning_data=None,
# Saving.
summary_log_path=None, save_path=None,
# Sampling.
sampler=None, means=None, input_sample=False, nsamp=10, samp_per_cond=1):
self._input_data = input_data
self._conditioning_data = conditioning_data
# Training parameters.
self._train_iters = train_iters
self._valid_iters = hold_iters
self._print_iters = print_iters
self._hold_interval = hold_interval
self._lr_decay = lr_decay
self._decay_interval = decay_interval
self._batch_size = batch_size
self._iters_pl = iters_pl
self._iters_func = iters_func
# Make losses
self._llikes = llikes
self._loss_op = loss
if penalty > 0.0:
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self._loss_op += penalty * sum(reg_losses)
# Training operations.
self._lr = tf.Variable(init_lr, trainable=False)
self._optimizer = optimizer = optimizer_class(self._lr)
self.tvars = tvars = tf.trainable_variables()
grads_tvars = optimizer.compute_gradients(self._loss_op, tvars)
if max_grad_norm is not None:
grads, _ = tf.clip_by_global_norm(
[gt[0] for gt in grads_tvars], max_grad_norm)
grads_tvars = zip(grads, tvars)
self._train_op = optimizer.apply_gradients(grads_tvars)
if do_check:
check_op = tf.add_check_numerics_ops()
self._train_op = tf.group(self._train_op, check_op,
tf.check_numerics(self._loss_op, 'check'))
if pretrain_scope is not None and pretrain_iters is not None:
self._do_pretrain = True
self._pretrain_iters = pretrain_iters
self.ptvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
pretrain_scope)
ptgrads = tf.gradients(self._loss_op, self.ptvars)
if max_grad_norm is not None:
ptgrads, _ = tf.clip_by_global_norm(ptgrads, max_grad_norm)
self._pretrain_op = optimizer.apply_gradients(
zip(ptgrads, self.ptvars)
)
else:
self._do_pretrain = False
if momentum is not None:
# mom_optimizer = tf.train.MomentumOptimizer(self._lr, momentum)
mom_optimizer = tf.train.RMSPropOptimizer(
self._lr, momentum=momentum, decay=rms_decay, epsilon=rms_eps)
self._momentum_iter = momentum_iter
self._momentum_op = mom_optimizer.apply_gradients(grads_tvars)
# self._momentum_op = tf.group(
# mom_optimizer.apply_gradients(grads_tvars), self._train_op)
else:
self._momentum_op = None
self._momentum_iter = None
if min_lr is None:
self._lr_update = tf.assign(self._lr, self._lr * self._lr_decay)
else:
self._lr_update = tf.assign(
self._lr, tf.maximum(min_lr, self._lr * self._lr_decay))
# Make session if needed.
if sess is None:
sess = tf.Session()
self._sess = sess
# Set up fetchers.
self._dropout_keeprate = dropout_keeprate
self._dropout_keeprate_val = dropout_keeprate_val
self._fetchers = fetchers
# Sampling.
self._sampler = sampler
self._means = means
self._input_sample = input_sample
self._nsamps = nsamp
self._samp_per_cond = samp_per_cond
# Summarization variables.
self._summary_log_path = summary_log_path
self._average_pl = tf.placeholder(tf.float32, name='average_pl')
self._average_summary = tf.summary.scalar('average_loss',
self._average_pl)
if self._summary_log_path is not None:
self._train_writer, self._val_writer, self._test_writer = \
make_writers(self._summary_log_path, self._sess)
else:
self._train_writer, self._val_writer, self._test_writer = \
(None, None, None)
if save_path is not None:
self._saver = tf.train.Saver()
self._save_path = os.path.join(save_path, 'model.ckpt')
else:
self._saver = None
self._save_path = None
def update_lr(self):
self._sess.run(self._lr_update)
def _setup_feed_dict(self, batch, testing=False, iters=None):
if self._conditioning_data is None:
feed_dict = {self._input_data: batch}
else:
feed_dict = {self._input_data: batch[0],
self._conditioning_data: batch[1]}
if self._dropout_keeprate is not None:
if not testing:
feed_dict[self._dropout_keeprate] = self._dropout_keeprate_val
else:
feed_dict[self._dropout_keeprate] = 0.0
if self._iters_pl is not None:
if not testing and iters is not None:
feed_dict[self._iters_pl] = self._iters_func(iters)
else:
feed_dict[self._iters_pl] = self._iters_func(self._train_iters)
return feed_dict
def _pretrain(self):
if not self._do_pretrain:
return True
for i in xrange(self._pretrain_iters):
# Decay the learning rate.
if i % self._decay_interval == 0:
if i > 0:
self.update_lr()
print('Iter: {} lrate: {}'.format(i, self._sess.run(self._lr)))
# Setup feed_dict.
batch = self._fetchers.train.next_batch(self._batch_size)
feed_dict = self._setup_feed_dict(batch, testing=False, iters=0)
# Print to screen and save summary.
if i % self._print_iters == 0:
train_loss, _ = self._sess.run(
(self._loss_op, self._pretrain_op), feed_dict=feed_dict)
print('Pretrain Iter: {} Train Loss: {}'.format(i, train_loss))
# Abort training if we have NaN loss
if np.isnan(train_loss):
return False
else:
self._sess.run(self._pretrain_op, feed_dict=feed_dict)
return True
def _save(self):
if self._saver is not None:
print('Saving {}...'.format(self._save_path))
self._saver.save(self._sess, self._save_path)
def _print_loss(self, i, loss, msg='Train Loss', writer=None):
print('Iter: {} {}: {}'.format(i, msg, loss))
if writer is not None:
writer.add_summary(
self._sess.run(self._average_summary,
feed_dict={self._average_pl: loss}), i
)
def main(self):
"""Runs the model on the given data.
Args:
summary_log_path: path to save tensorboard summaries.
save_path: path to save best validation set model.
print_iters: number of iterations to print to screen at.
Returns:
tuple of (best_validation_value, test_validation_value)
"""
self._sess.run(tf.global_variables_initializer())
# try to restore model
print('trying to restore a existing model...')
try:
self.restore_model()
print('successfully restored, continue training.')
except:
print('failed to restore, training from scratch.')
# Pretrain if needed.
if not self._pretrain():
return {'loss': np.NaN, 'test_llks': None}
# Main train loop.
best_loss = None
train_operation = self._train_op
for i in xrange(self._train_iters):
# if i >= 2250:
# import pdb; pdb.set_trace() # XXX BREAKPOINT
# Decay the learning rate.
if i % self._decay_interval == 0:
if i > 0:
self.update_lr()
print('Iter: {} lrate: {}'.format(i, self._sess.run(self._lr)))
# Use a momentum operator if it is over the momentum iterations.
if self._momentum_op is not None and i == self._momentum_iter:
print('Using RMSProp with Momentum.')
train_operation = self._momentum_op
# Training.
batch = self._fetchers.train.next_batch(self._batch_size)
feed_dict = self._setup_feed_dict(batch, testing=False, iters=i)
# Print to screen and save summary.
if i % self._print_iters == 0:
train_loss, _ = self._sess.run(
(self._loss_op, train_operation), feed_dict=feed_dict
)
self._print_loss(i, train_loss, writer=self._train_writer)
# self._print_loss(i, train_loss, writer=None)
# Abort training if we have NaN loss
# TODO: use the last saved model with a lower learning rate?
if np.isnan(train_loss):
return {'loss': np.NaN, 'test_llks': None}
else:
self._sess.run(train_operation, feed_dict=feed_dict)
# Validation.
if i == 0 or i % self._hold_interval == 0 \
or i + 1 == self._train_iters:
# Get validation validation value on validation set.
valid_loss = self.validation_loss(i)
# If this is the best validation value, record and save model.
if best_loss is None or best_loss > valid_loss:
best_loss = valid_loss
self._save()
# Testing.
# Get validation value on test set.
test_llks = self.test_llikelihoods(load_saved_model=True)
print('Mean test nll {}'.format(-np.mean(test_llks)))
# Sample using best model.
if self._sampler is not None:
samples, samples_cond = self.sample(load_saved_model=True)
test_samples, test_means, test_samples_cond = self.sample_test(
load_saved_model=True)
return {'loss': best_loss, 'test_llks': test_llks,
'samples': samples, 'samples_cond': samples_cond,
'test_samples': test_samples, 'test_means': test_means,
'test_samples_cond': test_samples_cond}
return {'loss': best_loss, 'test_llks': test_llks}
def validation_loss(self, i):
loss = 0.0
for j in xrange(self._valid_iters):
batch = self._fetchers.validation.next_batch(self._batch_size)
feed_dict = self._setup_feed_dict(batch, testing=True)
loss_batch = -np.mean(
self._sess.run(self._llikes, feed_dict=feed_dict))
loss += loss_batch
loss = loss / self._valid_iters
if self._val_writer is not None:
self._val_writer.add_summary(
self._sess.run(self._average_summary,
feed_dict={self._average_pl: loss}), i
)
print('Validation nll: {}'.format(loss))
return loss
def restore_model(self):
if self._saver is not None and self._save_path is not None:
self._saver.restore(self._sess, self._save_path)
def test_llikelihoods(self, load_saved_model=False):
if load_saved_model:
self.restore_model()
test_list = []
try:
while True:
batch = self._fetchers.test.next_batch(self._batch_size)
# pad batch if needed
n = batch[0].shape[0]
padding = self._batch_size - n
if padding > 0:
batch = tuple(np.concatenate(
[d, np.zeros([padding] + list(d.shape[1:]),
dtype='float32')],
axis=0) for d in batch)
feed_dict = self._setup_feed_dict(batch, testing=True)
llikes = self._sess.run(self._llikes, feed_dict=feed_dict)
if padding > 0:
llikes = llikes[:n]
test_list += [llikes]
except IndexError:
self._fetchers.test.reset_index()
print('REACHED END')
test_list = np.concatenate(test_list, 0)
return test_list
def sample_test(self, load_saved_model=True):
if load_saved_model:
self.restore_model()
if self._dropout_keeprate is not None:
feed_dict = {self._dropout_keeprate: 0.0}
else:
feed_dict = None
samples = []
means = []
samples_cond = []
try:
while True:
batch = self._fetchers.test.next_batch(self._batch_size)
# pad batch if needed
n = batch[0].shape[0]
padding = self._batch_size - n
if padding > 0:
batch = tuple(np.concatenate(
[d, np.zeros([padding] + list(d.shape[1:]),
dtype='float32')],
axis=0) for d in batch)
samp_cond = batch[1]
feed_dict = {} if feed_dict is None else feed_dict
feed_dict[self._conditioning_data] = samp_cond
if self._samp_per_cond == 1:
samp = self._sess.run(self._sampler, feed_dict=feed_dict)
else:
samp = []
for ci in range(self._samp_per_cond):
samp.append(
self._sess.run(self._sampler, feed_dict=feed_dict))
samp = np.stack(samp, 1)
mean = self._sess.run(self._means, feed_dict=feed_dict)
if padding > 0:
samp = samp[:n]
mean = mean[:n]
samp_cond = samp_cond[:n]
samples.append(samp)
means.append(mean)
samples_cond.append(samp_cond)
except IndexError:
self._fetchers.test.reset_index()
print('REACHED END')
samples = np.concatenate(samples, axis=0)
means = np.concatenate(means, axis=0)
samples_cond = np.concatenate(samples_cond, axis=0)
return samples, means, samples_cond
def sample(self, load_saved_model=False):
if load_saved_model:
self.restore_model()
samples = []
samples_cond = []
nsamp = int(self._sampler.get_shape()[0])
for si in range(self._nsamps):
cond_dict = {}
if self._dropout_keeprate is not None:
feed_dict = {self._dropout_keeprate: 0.0}
else:
feed_dict = None
batch = self._fetchers.validation.next_batch(nsamp)
if self._conditioning_data is not None:
# Get validation labels to condition on.
samp_cond = batch[1]
feed_dict = {} if feed_dict is None else feed_dict
feed_dict[self._conditioning_data] = samp_cond
cond_dict['cond_val'] = samp_cond
if self._input_sample:
feed_dict[self._input_data] = batch[0]
cond_dict['inp_val'] = batch[0]
elif self._input_sample:
feed_dict = {} if feed_dict is None else feed_dict
feed_dict[self._input_data] = batch
cond_dict['inp_val'] = batch
samples_cond.append(cond_dict)
if self._samp_per_cond == 1:
samp = self._sess.run(self._sampler, feed_dict=feed_dict)
else:
samp = []
for ci in range(self._samp_per_cond):
samp.append(
self._sess.run(self._sampler, feed_dict=feed_dict))
samp = np.stack(samp, 1)
samples.append(samp)
samples = np.concatenate(samples, 0)
# if len(samples_cond) > 0:
# return samples, np.concatenate(samples_cond, 0)
# return samples
return samples, samples_cond
def make_writers(summary_log_path, sess):
train_writer = tf.summary.FileWriter(
os.path.join(summary_log_path, TRAIN), sess.graph
)
val_writer = tf.summary.FileWriter(
os.path.join(summary_log_path, VALID), sess.graph
)
test_writer = tf.summary.FileWriter(
os.path.join(summary_log_path, TEST), sess.graph
)
return train_writer, val_writer, test_writer
| import tensorflow as tf
import numpy as np
import os
TRAIN = 'train'
VALID = 'valid'
TEST = 'test'
# TODO: chance name.
class RedTrainer:
"""Encapsulates the logic for training a sequence model.
Args:
fetchers: A dictionary of fetchers for training, validation, and testing
datasets-
{TRAIN: train_fetcher, VALID: valid_fetcher, TEST: test_fetcher}.
Each fetcher implements functions
...
loss: scalar, loss to optimize.
input_data: placeholder for N x d covariates
batch_size: int, size of batches for training.
sess: tf session to train in.
init_lr: scalar, initial learning rate.
lr_decay: scalar, multiplicative decay factor of learning rate.
decay_interval: int, number of batches between decay of learning rate.
min_lr: scalar, minimum value of learning rate to use.
penalty: scalar, multiplier to ridge penalty.
dropout_keeprate: scalar placeholder for dropout value
dropout_keeprate_val: real 0< <=1 of kept dropout rate for training
train_iters: int, number of batches to train model for.
hold_iters: int, number validation batches to use.
print_iters: int, print training stats (like loss) every print_iters
batches.
hold_interval: int, print validation stats every hold_intervals.
iters_pl: optional placeholder/tensor for iterations.
# TODO: what is this for?
iters_func:
optimizer_class: class of tf optimizer to use.
max_grad_norm: scalar, norm to clip gradients to.
do_check: boolean indicating whether to use check_ops for debugging.
momentum: Deprecated.
momentum_iter: Deprecated.
rms_decay: Deprecated.
rms_eps: Deprecated.
pretrain_scope: variable scope to match variables with re.match to
pretrain.
pretrain_iters: int, number of batches to pretrain for.
conditioning_data: placeholder of N x p extraneous covariates.
summary_log_path: string, path to save log files to.
save_path: string, path to save the graph to.
sampler:
input_sample:
nsamp:
samp_per_cond:
"""
def __init__(self, fetchers, loss, input_data, llikes,
batch_size=128, sess=None,
# Learning rate.
init_lr=0.1, lr_decay=0.9, decay_interval=10000, min_lr=None,
# Regularization.
penalty=0.0, dropout_keeprate=None, dropout_keeprate_val=1.0,
# Iteration configs.
# TODO: change hold_iter to valid_interval
# TODO: change iters to intervals
train_iters=100000, hold_iters=1000, print_iters=100,
hold_interval=1000,
iters_pl=None, iters_func=lambda x: x,
# Optimizer configs.
optimizer_class=tf.train.GradientDescentOptimizer,
max_grad_norm=None, do_check=False,
# Momentum.
# TODO: remove.
momentum=None, momentum_iter=1500, rms_decay=0.9,
rms_eps=1e-10,
# Pretraining.
pretrain_scope=None, pretrain_iters=5000,
# Conditioning.
conditioning_data=None,
# Saving.
summary_log_path=None, save_path=None,
# Sampling.
sampler=None, means=None, input_sample=False, nsamp=10, samp_per_cond=1):
self._input_data = input_data
self._conditioning_data = conditioning_data
# Training parameters.
self._train_iters = train_iters
self._valid_iters = hold_iters
self._print_iters = print_iters
self._hold_interval = hold_interval
self._lr_decay = lr_decay
self._decay_interval = decay_interval
self._batch_size = batch_size
self._iters_pl = iters_pl
self._iters_func = iters_func
# Make losses
self._llikes = llikes
self._loss_op = loss
if penalty > 0.0:
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self._loss_op += penalty * sum(reg_losses)
# Training operations.
self._lr = tf.Variable(init_lr, trainable=False)
self._optimizer = optimizer = optimizer_class(self._lr)
self.tvars = tvars = tf.trainable_variables()
grads_tvars = optimizer.compute_gradients(self._loss_op, tvars)
if max_grad_norm is not None:
grads, _ = tf.clip_by_global_norm(
[gt[0] for gt in grads_tvars], max_grad_norm)
grads_tvars = zip(grads, tvars)
self._train_op = optimizer.apply_gradients(grads_tvars)
if do_check:
check_op = tf.add_check_numerics_ops()
self._train_op = tf.group(self._train_op, check_op,
tf.check_numerics(self._loss_op, 'check'))
if pretrain_scope is not None and pretrain_iters is not None:
self._do_pretrain = True
self._pretrain_iters = pretrain_iters
self.ptvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
pretrain_scope)
ptgrads = tf.gradients(self._loss_op, self.ptvars)
if max_grad_norm is not None:
ptgrads, _ = tf.clip_by_global_norm(ptgrads, max_grad_norm)
self._pretrain_op = optimizer.apply_gradients(
zip(ptgrads, self.ptvars)
)
else:
self._do_pretrain = False
if momentum is not None:
# mom_optimizer = tf.train.MomentumOptimizer(self._lr, momentum)
mom_optimizer = tf.train.RMSPropOptimizer(
self._lr, momentum=momentum, decay=rms_decay, epsilon=rms_eps)
self._momentum_iter = momentum_iter
self._momentum_op = mom_optimizer.apply_gradients(grads_tvars)
# self._momentum_op = tf.group(
# mom_optimizer.apply_gradients(grads_tvars), self._train_op)
else:
self._momentum_op = None
self._momentum_iter = None
if min_lr is None:
self._lr_update = tf.assign(self._lr, self._lr * self._lr_decay)
else:
self._lr_update = tf.assign(
self._lr, tf.maximum(min_lr, self._lr * self._lr_decay))
# Make session if needed.
if sess is None:
sess = tf.Session()
self._sess = sess
# Set up fetchers.
self._dropout_keeprate = dropout_keeprate
self._dropout_keeprate_val = dropout_keeprate_val
self._fetchers = fetchers
# Sampling.
self._sampler = sampler
self._means = means
self._input_sample = input_sample
self._nsamps = nsamp
self._samp_per_cond = samp_per_cond
# Summarization variables.
self._summary_log_path = summary_log_path
self._average_pl = tf.placeholder(tf.float32, name='average_pl')
self._average_summary = tf.summary.scalar('average_loss',
self._average_pl)
if self._summary_log_path is not None:
self._train_writer, self._val_writer, self._test_writer = \
make_writers(self._summary_log_path, self._sess)
else:
self._train_writer, self._val_writer, self._test_writer = \
(None, None, None)
if save_path is not None:
self._saver = tf.train.Saver()
self._save_path = os.path.join(save_path, 'model.ckpt')
else:
self._saver = None
self._save_path = None
def update_lr(self):
self._sess.run(self._lr_update)
def _setup_feed_dict(self, batch, testing=False, iters=None):
if self._conditioning_data is None:
feed_dict = {self._input_data: batch}
else:
feed_dict = {self._input_data: batch[0],
self._conditioning_data: batch[1]}
if self._dropout_keeprate is not None:
if not testing:
feed_dict[self._dropout_keeprate] = self._dropout_keeprate_val
else:
feed_dict[self._dropout_keeprate] = 0.0
if self._iters_pl is not None:
if not testing and iters is not None:
feed_dict[self._iters_pl] = self._iters_func(iters)
else:
feed_dict[self._iters_pl] = self._iters_func(self._train_iters)
return feed_dict
def _pretrain(self):
if not self._do_pretrain:
return True
for i in xrange(self._pretrain_iters):
# Decay the learning rate.
if i % self._decay_interval == 0:
if i > 0:
self.update_lr()
print('Iter: {} lrate: {}'.format(i, self._sess.run(self._lr)))
# Setup feed_dict.
batch = self._fetchers.train.next_batch(self._batch_size)
feed_dict = self._setup_feed_dict(batch, testing=False, iters=0)
# Print to screen and save summary.
if i % self._print_iters == 0:
train_loss, _ = self._sess.run(
(self._loss_op, self._pretrain_op), feed_dict=feed_dict)
print('Pretrain Iter: {} Train Loss: {}'.format(i, train_loss))
# Abort training if we have NaN loss
if np.isnan(train_loss):
return False
else:
self._sess.run(self._pretrain_op, feed_dict=feed_dict)
return True
def _save(self):
if self._saver is not None:
print('Saving {}...'.format(self._save_path))
self._saver.save(self._sess, self._save_path)
def _print_loss(self, i, loss, msg='Train Loss', writer=None):
print('Iter: {} {}: {}'.format(i, msg, loss))
if writer is not None:
writer.add_summary(
self._sess.run(self._average_summary,
feed_dict={self._average_pl: loss}), i
)
def main(self):
"""Runs the model on the given data.
Args:
summary_log_path: path to save tensorboard summaries.
save_path: path to save best validation set model.
print_iters: number of iterations to print to screen at.
Returns:
tuple of (best_validation_value, test_validation_value)
"""
self._sess.run(tf.global_variables_initializer())
# try to restore model
print('trying to restore a existing model...')
try:
self.restore_model()
print('successfully restored, continue training.')
except:
print('failed to restore, training from scratch.')
# Pretrain if needed.
if not self._pretrain():
return {'loss': np.NaN, 'test_llks': None}
# Main train loop.
best_loss = None
train_operation = self._train_op
for i in xrange(self._train_iters):
# if i >= 2250:
# import pdb; pdb.set_trace() # XXX BREAKPOINT
# Decay the learning rate.
if i % self._decay_interval == 0:
if i > 0:
self.update_lr()
print('Iter: {} lrate: {}'.format(i, self._sess.run(self._lr)))
# Use a momentum operator if it is over the momentum iterations.
if self._momentum_op is not None and i == self._momentum_iter:
print('Using RMSProp with Momentum.')
train_operation = self._momentum_op
# Training.
batch = self._fetchers.train.next_batch(self._batch_size)
feed_dict = self._setup_feed_dict(batch, testing=False, iters=i)
# Print to screen and save summary.
if i % self._print_iters == 0:
train_loss, _ = self._sess.run(
(self._loss_op, train_operation), feed_dict=feed_dict
)
self._print_loss(i, train_loss, writer=self._train_writer)
# self._print_loss(i, train_loss, writer=None)
# Abort training if we have NaN loss
# TODO: use the last saved model with a lower learning rate?
if np.isnan(train_loss):
return {'loss': np.NaN, 'test_llks': None}
else:
self._sess.run(train_operation, feed_dict=feed_dict)
# Validation.
if i == 0 or i % self._hold_interval == 0 \
or i + 1 == self._train_iters:
# Get validation validation value on validation set.
valid_loss = self.validation_loss(i)
# If this is the best validation value, record and save model.
if best_loss is None or best_loss > valid_loss:
best_loss = valid_loss
self._save()
# Testing.
# Get validation value on test set.
test_llks = self.test_llikelihoods(load_saved_model=True)
print('Mean test nll {}'.format(-np.mean(test_llks)))
# Sample using best model.
if self._sampler is not None:
samples, samples_cond = self.sample(load_saved_model=True)
test_samples, test_means, test_samples_cond = self.sample_test(
load_saved_model=True)
return {'loss': best_loss, 'test_llks': test_llks,
'samples': samples, 'samples_cond': samples_cond,
'test_samples': test_samples, 'test_means': test_means,
'test_samples_cond': test_samples_cond}
return {'loss': best_loss, 'test_llks': test_llks}
def validation_loss(self, i):
loss = 0.0
for j in xrange(self._valid_iters):
batch = self._fetchers.validation.next_batch(self._batch_size)
feed_dict = self._setup_feed_dict(batch, testing=True)
loss_batch = -np.mean(
self._sess.run(self._llikes, feed_dict=feed_dict))
loss += loss_batch
loss = loss / self._valid_iters
if self._val_writer is not None:
self._val_writer.add_summary(
self._sess.run(self._average_summary,
feed_dict={self._average_pl: loss}), i
)
print('Validation nll: {}'.format(loss))
return loss
def restore_model(self):
if self._saver is not None and self._save_path is not None:
self._saver.restore(self._sess, self._save_path)
def test_llikelihoods(self, load_saved_model=False):
if load_saved_model:
self.restore_model()
test_list = []
try:
while True:
batch = self._fetchers.test.next_batch(self._batch_size)
# pad batch if needed
n = batch[0].shape[0]
padding = self._batch_size - n
if padding > 0:
batch = tuple(np.concatenate(
[d, np.zeros([padding] + list(d.shape[1:]),
dtype='float32')],
axis=0) for d in batch)
feed_dict = self._setup_feed_dict(batch, testing=True)
llikes = self._sess.run(self._llikes, feed_dict=feed_dict)
if padding > 0:
llikes = llikes[:n]
test_list += [llikes]
except IndexError:
self._fetchers.test.reset_index()
print('REACHED END')
test_list = np.concatenate(test_list, 0)
return test_list
def sample_test(self, load_saved_model=True):
if load_saved_model:
self.restore_model()
if self._dropout_keeprate is not None:
feed_dict = {self._dropout_keeprate: 0.0}
else:
feed_dict = None
samples = []
means = []
samples_cond = []
try:
while True:
batch = self._fetchers.test.next_batch(self._batch_size)
# pad batch if needed
n = batch[0].shape[0]
padding = self._batch_size - n
if padding > 0:
batch = tuple(np.concatenate(
[d, np.zeros([padding] + list(d.shape[1:]),
dtype='float32')],
axis=0) for d in batch)
samp_cond = batch[1]
feed_dict = {} if feed_dict is None else feed_dict
feed_dict[self._conditioning_data] = samp_cond
if self._samp_per_cond == 1:
samp = self._sess.run(self._sampler, feed_dict=feed_dict)
else:
samp = []
for ci in range(self._samp_per_cond):
samp.append(
self._sess.run(self._sampler, feed_dict=feed_dict))
samp = np.stack(samp, 1)
mean = self._sess.run(self._means, feed_dict=feed_dict)
if padding > 0:
samp = samp[:n]
mean = mean[:n]
samp_cond = samp_cond[:n]
samples.append(samp)
means.append(mean)
samples_cond.append(samp_cond)
except IndexError:
self._fetchers.test.reset_index()
print('REACHED END')
samples = np.concatenate(samples, axis=0)
means = np.concatenate(means, axis=0)
samples_cond = np.concatenate(samples_cond, axis=0)
return samples, means, samples_cond
def sample(self, load_saved_model=False):
if load_saved_model:
self.restore_model()
samples = []
samples_cond = []
nsamp = int(self._sampler.get_shape()[0])
for si in range(self._nsamps):
cond_dict = {}
if self._dropout_keeprate is not None:
feed_dict = {self._dropout_keeprate: 0.0}
else:
feed_dict = None
batch = self._fetchers.validation.next_batch(nsamp)
if self._conditioning_data is not None:
# Get validation labels to condition on.
samp_cond = batch[1]
feed_dict = {} if feed_dict is None else feed_dict
feed_dict[self._conditioning_data] = samp_cond
cond_dict['cond_val'] = samp_cond
if self._input_sample:
feed_dict[self._input_data] = batch[0]
cond_dict['inp_val'] = batch[0]
elif self._input_sample:
feed_dict = {} if feed_dict is None else feed_dict
feed_dict[self._input_data] = batch
cond_dict['inp_val'] = batch
samples_cond.append(cond_dict)
if self._samp_per_cond == 1:
samp = self._sess.run(self._sampler, feed_dict=feed_dict)
else:
samp = []
for ci in range(self._samp_per_cond):
samp.append(
self._sess.run(self._sampler, feed_dict=feed_dict))
samp = np.stack(samp, 1)
samples.append(samp)
samples = np.concatenate(samples, 0)
# if len(samples_cond) > 0:
# return samples, np.concatenate(samples_cond, 0)
# return samples
return samples, samples_cond
def make_writers(summary_log_path, sess):
train_writer = tf.summary.FileWriter(
os.path.join(summary_log_path, TRAIN), sess.graph
)
val_writer = tf.summary.FileWriter(
os.path.join(summary_log_path, VALID), sess.graph
)
test_writer = tf.summary.FileWriter(
os.path.join(summary_log_path, TEST), sess.graph
)
return train_writer, val_writer, test_writer
| en | 0.678481 | # TODO: chance name. Encapsulates the logic for training a sequence model. Args: fetchers: A dictionary of fetchers for training, validation, and testing datasets- {TRAIN: train_fetcher, VALID: valid_fetcher, TEST: test_fetcher}. Each fetcher implements functions ... loss: scalar, loss to optimize. input_data: placeholder for N x d covariates batch_size: int, size of batches for training. sess: tf session to train in. init_lr: scalar, initial learning rate. lr_decay: scalar, multiplicative decay factor of learning rate. decay_interval: int, number of batches between decay of learning rate. min_lr: scalar, minimum value of learning rate to use. penalty: scalar, multiplier to ridge penalty. dropout_keeprate: scalar placeholder for dropout value dropout_keeprate_val: real 0< <=1 of kept dropout rate for training train_iters: int, number of batches to train model for. hold_iters: int, number validation batches to use. print_iters: int, print training stats (like loss) every print_iters batches. hold_interval: int, print validation stats every hold_intervals. iters_pl: optional placeholder/tensor for iterations. # TODO: what is this for? iters_func: optimizer_class: class of tf optimizer to use. max_grad_norm: scalar, norm to clip gradients to. do_check: boolean indicating whether to use check_ops for debugging. momentum: Deprecated. momentum_iter: Deprecated. rms_decay: Deprecated. rms_eps: Deprecated. pretrain_scope: variable scope to match variables with re.match to pretrain. pretrain_iters: int, number of batches to pretrain for. conditioning_data: placeholder of N x p extraneous covariates. summary_log_path: string, path to save log files to. save_path: string, path to save the graph to. sampler: input_sample: nsamp: samp_per_cond: # Learning rate. # Regularization. # Iteration configs. # TODO: change hold_iter to valid_interval # TODO: change iters to intervals # Optimizer configs. # Momentum. # TODO: remove. # Pretraining. # Conditioning. # Saving. # Sampling. # Training parameters. # Make losses # Training operations. # mom_optimizer = tf.train.MomentumOptimizer(self._lr, momentum) # self._momentum_op = tf.group( # mom_optimizer.apply_gradients(grads_tvars), self._train_op) # Make session if needed. # Set up fetchers. # Sampling. # Summarization variables. # Decay the learning rate. # Setup feed_dict. # Print to screen and save summary. # Abort training if we have NaN loss Runs the model on the given data. Args: summary_log_path: path to save tensorboard summaries. save_path: path to save best validation set model. print_iters: number of iterations to print to screen at. Returns: tuple of (best_validation_value, test_validation_value) # try to restore model # Pretrain if needed. # Main train loop. # if i >= 2250: # import pdb; pdb.set_trace() # XXX BREAKPOINT # Decay the learning rate. # Use a momentum operator if it is over the momentum iterations. # Training. # Print to screen and save summary. # self._print_loss(i, train_loss, writer=None) # Abort training if we have NaN loss # TODO: use the last saved model with a lower learning rate? # Validation. # Get validation validation value on validation set. # If this is the best validation value, record and save model. # Testing. # Get validation value on test set. # Sample using best model. # pad batch if needed # pad batch if needed # Get validation labels to condition on. # if len(samples_cond) > 0: # return samples, np.concatenate(samples_cond, 0) # return samples | 3.054976 | 3 |
blender-processing-scripts/render/asset_thumbnail.py | MikeFesta/3xr | 7 | 6633071 | # SPDX-License-Identifier: Apache-2.0
import bpy
import math
import os
import time
from xrs import automate as xra
xra.log_info('Rendering Asset Thumbnail')
arguments = xra.get_command_line_arguments()
working_dir = arguments[0]
asset_name = arguments[1]
asset_blend = working_dir + asset_name + '.blend'
xra.log_info('Linking asset from ' + asset_blend)
xra.link_collection(asset_blend, "web")
if ("web" not in bpy.data.collections):
# Exit if the collection couldn't be loaded
xra.quit_with_error("Unable to load web collection")
# Relink the textures to the current directory
xra.relink_textures(working_dir)
target_height = bpy.data.collections["web"].objects[0].dimensions.z
# Render Engine Setup (Note: Eevee not supported headless)
#https://devtalk.blender.org/t/blender-2-8-unable-to-open-a-display-by-the-rendering-on-the-background-eevee/1436/24
xra.log_info('Setting Render Engine to Cycles')
xra.set_renderer_to_cycles(64) #TODO: experiment with this number
xra.set_render_resolution(1024, 1024)
# Image Save Location
xra.log_info('Setting Image Save Location')
bpy.context.scene.render.filepath = working_dir + asset_name + "-1k.jpg"
bpy.context.scene.render.image_settings.file_format = "JPEG"
xra.log_info(bpy.context.scene.render.filepath)
# Camera
bpy.ops.object.camera_add()
bpy.context.scene.camera = bpy.context.active_object
xra.point_camera_at_object_from_angle(
bpy.context.scene.camera,
bpy.data.collections["web"].objects[0], #TODO: Multiple Objects
-math.pi/6,
-math.pi/8
)
# Lighting Setup
xra.set_hdr_lighting("bw_lebombo_1k", 0.25)
xra.place_light_above_camera(bpy.data.objects["Point"], bpy.context.scene.camera, target_height)
xra.set_light_strength(bpy.data.objects["Point"], target_height * 30)
# Scale the Backdrop and light strength
xra.scale_object_uniform(bpy.data.objects['Backdrop'], target_height)
# Render Image
xra.log_info('Starting Render')
timer = time.time()
bpy.ops.render.render()
# Save Image
bpy.data.images["Render Result"].save_render(filepath=bpy.context.scene.render.filepath)
xra.log_info("Render Thumbnail Time: " + str(time.time() - timer) + " seconds")
| # SPDX-License-Identifier: Apache-2.0
import bpy
import math
import os
import time
from xrs import automate as xra
xra.log_info('Rendering Asset Thumbnail')
arguments = xra.get_command_line_arguments()
working_dir = arguments[0]
asset_name = arguments[1]
asset_blend = working_dir + asset_name + '.blend'
xra.log_info('Linking asset from ' + asset_blend)
xra.link_collection(asset_blend, "web")
if ("web" not in bpy.data.collections):
# Exit if the collection couldn't be loaded
xra.quit_with_error("Unable to load web collection")
# Relink the textures to the current directory
xra.relink_textures(working_dir)
target_height = bpy.data.collections["web"].objects[0].dimensions.z
# Render Engine Setup (Note: Eevee not supported headless)
#https://devtalk.blender.org/t/blender-2-8-unable-to-open-a-display-by-the-rendering-on-the-background-eevee/1436/24
xra.log_info('Setting Render Engine to Cycles')
xra.set_renderer_to_cycles(64) #TODO: experiment with this number
xra.set_render_resolution(1024, 1024)
# Image Save Location
xra.log_info('Setting Image Save Location')
bpy.context.scene.render.filepath = working_dir + asset_name + "-1k.jpg"
bpy.context.scene.render.image_settings.file_format = "JPEG"
xra.log_info(bpy.context.scene.render.filepath)
# Camera
bpy.ops.object.camera_add()
bpy.context.scene.camera = bpy.context.active_object
xra.point_camera_at_object_from_angle(
bpy.context.scene.camera,
bpy.data.collections["web"].objects[0], #TODO: Multiple Objects
-math.pi/6,
-math.pi/8
)
# Lighting Setup
xra.set_hdr_lighting("bw_lebombo_1k", 0.25)
xra.place_light_above_camera(bpy.data.objects["Point"], bpy.context.scene.camera, target_height)
xra.set_light_strength(bpy.data.objects["Point"], target_height * 30)
# Scale the Backdrop and light strength
xra.scale_object_uniform(bpy.data.objects['Backdrop'], target_height)
# Render Image
xra.log_info('Starting Render')
timer = time.time()
bpy.ops.render.render()
# Save Image
bpy.data.images["Render Result"].save_render(filepath=bpy.context.scene.render.filepath)
xra.log_info("Render Thumbnail Time: " + str(time.time() - timer) + " seconds")
| en | 0.638381 | # SPDX-License-Identifier: Apache-2.0 # Exit if the collection couldn't be loaded # Relink the textures to the current directory # Render Engine Setup (Note: Eevee not supported headless) #https://devtalk.blender.org/t/blender-2-8-unable-to-open-a-display-by-the-rendering-on-the-background-eevee/1436/24 #TODO: experiment with this number # Image Save Location # Camera #TODO: Multiple Objects # Lighting Setup # Scale the Backdrop and light strength # Render Image # Save Image | 2.108905 | 2 |
tests/unit/flow/test_flow_except.py | yk/jina | 1 | 6633072 | import numpy as np
from jina.executors.crafters import BaseCrafter
from jina.flow import Flow
from jina.proto import jina_pb2
class DummyCrafter(BaseCrafter):
def craft(self, *args, **kwargs):
return 1 / 0
def test_bad_flow(mocker):
def validate(req):
bad_routes = [r for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert req.status.code == jina_pb2.StatusProto.ERROR
assert bad_routes[0].pod == 'r1'
f = (Flow().add(name='r1', uses='!BaseCrafter')
.add(name='r2', uses='!BaseEncoder')
.add(name='r3', uses='!BaseEncoder'))
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_bad_flow_customized(mocker):
def validate(req):
bad_routes = [r for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert req.status.code == jina_pb2.StatusProto.ERROR
assert bad_routes[0].pod == 'r2'
assert bad_routes[0].status.exception.name == 'ZeroDivisionError'
f = (Flow().add(name='r1')
.add(name='r2', uses='!DummyCrafter')
.add(name='r3', uses='!BaseEncoder'))
with f:
pass
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_except_with_parallel(mocker):
def validate(req):
assert req.status.code == jina_pb2.StatusProto.ERROR
err_routes = [r.status for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert len(err_routes) == 2
assert err_routes[0].exception.executor == 'DummyCrafter'
assert err_routes[1].exception.executor == 'BaseEncoder'
assert err_routes[0].exception.name == 'ZeroDivisionError'
assert err_routes[1].exception.name == 'NotImplementedError'
f = (Flow().add(name='r1')
.add(name='r2', uses='!DummyCrafter', parallel=3)
.add(name='r3', uses='!BaseEncoder'))
with f:
pass
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_on_error_callback(mocker):
def validate1():
raise NotImplementedError
def validate2(x, *args):
x = x.routes
assert len(x) == 4 # gateway, r1, r3, gateway
badones = [r for r in x if r.status.code == jina_pb2.StatusProto.ERROR]
assert badones[0].pod == 'r3'
f = (Flow().add(name='r1')
.add(name='r3', uses='!BaseEncoder'))
on_error_mock = mocker.Mock(wrap=validate2)
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_done=validate1, on_error=on_error_mock)
on_error_mock.assert_called()
def test_no_error_callback(mocker):
def validate2():
raise NotImplementedError
def validate1(x, *args):
pass
f = (Flow().add(name='r1')
.add(name='r3'))
response_mock = mocker.Mock(wrap=validate1)
on_error_mock = mocker.Mock(wrap=validate2)
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_done=response_mock, on_error=on_error_mock)
response_mock.assert_called()
on_error_mock.assert_not_called()
def test_flow_on_callback():
f = Flow().add()
hit = []
def f1(*args):
hit.append('done')
def f2(*args):
hit.append('error')
def f3(*args):
hit.append('always')
with f:
f.index(np.random.random([10, 10]),
on_done=f1, on_error=f2, on_always=f3)
assert hit == ['done', 'always']
hit.clear()
def test_flow_on_error_callback():
class DummyCrafter(BaseCrafter):
def craft(self, *args, **kwargs):
raise NotImplementedError
f = Flow().add(uses='DummyCrafter')
hit = []
def f1(*args):
hit.append('done')
def f2(*args):
hit.append('error')
def f3(*args):
hit.append('always')
with f:
f.index(np.random.random([10, 10]),
on_done=f1, on_error=f2, on_always=f3)
assert hit == ['error', 'always']
hit.clear()
| import numpy as np
from jina.executors.crafters import BaseCrafter
from jina.flow import Flow
from jina.proto import jina_pb2
class DummyCrafter(BaseCrafter):
def craft(self, *args, **kwargs):
return 1 / 0
def test_bad_flow(mocker):
def validate(req):
bad_routes = [r for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert req.status.code == jina_pb2.StatusProto.ERROR
assert bad_routes[0].pod == 'r1'
f = (Flow().add(name='r1', uses='!BaseCrafter')
.add(name='r2', uses='!BaseEncoder')
.add(name='r3', uses='!BaseEncoder'))
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_bad_flow_customized(mocker):
def validate(req):
bad_routes = [r for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert req.status.code == jina_pb2.StatusProto.ERROR
assert bad_routes[0].pod == 'r2'
assert bad_routes[0].status.exception.name == 'ZeroDivisionError'
f = (Flow().add(name='r1')
.add(name='r2', uses='!DummyCrafter')
.add(name='r3', uses='!BaseEncoder'))
with f:
pass
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_except_with_parallel(mocker):
def validate(req):
assert req.status.code == jina_pb2.StatusProto.ERROR
err_routes = [r.status for r in req.routes if r.status.code == jina_pb2.StatusProto.ERROR]
assert len(err_routes) == 2
assert err_routes[0].exception.executor == 'DummyCrafter'
assert err_routes[1].exception.executor == 'BaseEncoder'
assert err_routes[0].exception.name == 'ZeroDivisionError'
assert err_routes[1].exception.name == 'NotImplementedError'
f = (Flow().add(name='r1')
.add(name='r2', uses='!DummyCrafter', parallel=3)
.add(name='r3', uses='!BaseEncoder'))
with f:
pass
on_error_mock = mocker.Mock(wrap=validate)
on_error_mock_2 = mocker.Mock(wrap=validate)
# always test two times, make sure the flow still works after it fails on the first
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock)
f.index_lines(lines=['abbcs', 'efgh'], on_error=on_error_mock_2)
on_error_mock.assert_called()
on_error_mock_2.assert_called()
def test_on_error_callback(mocker):
def validate1():
raise NotImplementedError
def validate2(x, *args):
x = x.routes
assert len(x) == 4 # gateway, r1, r3, gateway
badones = [r for r in x if r.status.code == jina_pb2.StatusProto.ERROR]
assert badones[0].pod == 'r3'
f = (Flow().add(name='r1')
.add(name='r3', uses='!BaseEncoder'))
on_error_mock = mocker.Mock(wrap=validate2)
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_done=validate1, on_error=on_error_mock)
on_error_mock.assert_called()
def test_no_error_callback(mocker):
def validate2():
raise NotImplementedError
def validate1(x, *args):
pass
f = (Flow().add(name='r1')
.add(name='r3'))
response_mock = mocker.Mock(wrap=validate1)
on_error_mock = mocker.Mock(wrap=validate2)
with f:
f.index_lines(lines=['abbcs', 'efgh'], on_done=response_mock, on_error=on_error_mock)
response_mock.assert_called()
on_error_mock.assert_not_called()
def test_flow_on_callback():
f = Flow().add()
hit = []
def f1(*args):
hit.append('done')
def f2(*args):
hit.append('error')
def f3(*args):
hit.append('always')
with f:
f.index(np.random.random([10, 10]),
on_done=f1, on_error=f2, on_always=f3)
assert hit == ['done', 'always']
hit.clear()
def test_flow_on_error_callback():
class DummyCrafter(BaseCrafter):
def craft(self, *args, **kwargs):
raise NotImplementedError
f = Flow().add(uses='DummyCrafter')
hit = []
def f1(*args):
hit.append('done')
def f2(*args):
hit.append('error')
def f3(*args):
hit.append('always')
with f:
f.index(np.random.random([10, 10]),
on_done=f1, on_error=f2, on_always=f3)
assert hit == ['error', 'always']
hit.clear()
| en | 0.959779 | # always test two times, make sure the flow still works after it fails on the first # always test two times, make sure the flow still works after it fails on the first # always test two times, make sure the flow still works after it fails on the first # gateway, r1, r3, gateway | 2.251573 | 2 |
metabolite_database/main/forms.py | lparsons/metabolite_database | 0 | 6633073 | from flask_wtf import FlaskForm
from wtforms import SelectField, SelectMultipleField, SubmitField, widgets
from wtforms.validators import DataRequired, InputRequired
class MultiCheckboxField(SelectMultipleField):
"""
A multiple-select, except displays a list of checkboxes.
Iterating the field will produce subfields, allowing custom rendering of
the enclosed checkbox fields.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class RetentionTimesForm(FlaskForm):
compoundlist = SelectField('Compound List', coerce=int,
validators=[InputRequired()])
standardruns = MultiCheckboxField(
'Standard Runs', coerce=int, validators=[DataRequired()])
submit = SubmitField('Get List')
| from flask_wtf import FlaskForm
from wtforms import SelectField, SelectMultipleField, SubmitField, widgets
from wtforms.validators import DataRequired, InputRequired
class MultiCheckboxField(SelectMultipleField):
"""
A multiple-select, except displays a list of checkboxes.
Iterating the field will produce subfields, allowing custom rendering of
the enclosed checkbox fields.
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class RetentionTimesForm(FlaskForm):
compoundlist = SelectField('Compound List', coerce=int,
validators=[InputRequired()])
standardruns = MultiCheckboxField(
'Standard Runs', coerce=int, validators=[DataRequired()])
submit = SubmitField('Get List')
| en | 0.691985 | A multiple-select, except displays a list of checkboxes. Iterating the field will produce subfields, allowing custom rendering of the enclosed checkbox fields. | 2.769108 | 3 |
server/tests/test_timeserials_job.py | data2068/dataplay3 | 153 | 6633074 | <gh_stars>100-1000
from dataplay.mlsvc.job import MLJobStatus
from dataplay.mlsvc.time_serials import TimeSerialsForecastsJob
from dataplay.datasvc.manager import DatasetManager
def test_job_time_serials():
dataset_id = 'air_passengers'
dataset = DatasetManager.get_dataset(dataset_id)
assert dataset is not None
df = dataset.get_df()
assert df is not None
features = ['Date']
targets = ['Number']
job_option = {}
job = TimeSerialsForecastsJob('testtimeserials', dataset_id, features, targets, job_option)
job.train()
if hasattr(job, 'training_error'):
print(f'training error was detected {job.training_error}')
assert job.get_status() == MLJobStatus.SUCCESS
predict_result = job.predict(df[features])
assert predict_result is not None
predict_result.to_csv('/tmp/tt.csv', encoding='utf-8')
# job.clean()
| from dataplay.mlsvc.job import MLJobStatus
from dataplay.mlsvc.time_serials import TimeSerialsForecastsJob
from dataplay.datasvc.manager import DatasetManager
def test_job_time_serials():
dataset_id = 'air_passengers'
dataset = DatasetManager.get_dataset(dataset_id)
assert dataset is not None
df = dataset.get_df()
assert df is not None
features = ['Date']
targets = ['Number']
job_option = {}
job = TimeSerialsForecastsJob('testtimeserials', dataset_id, features, targets, job_option)
job.train()
if hasattr(job, 'training_error'):
print(f'training error was detected {job.training_error}')
assert job.get_status() == MLJobStatus.SUCCESS
predict_result = job.predict(df[features])
assert predict_result is not None
predict_result.to_csv('/tmp/tt.csv', encoding='utf-8')
# job.clean() | en | 0.819534 | # job.clean() | 2.416675 | 2 |
pihole/tests/conftest.py | divyamamgai/integrations-extras | 158 | 6633075 | <gh_stars>100-1000
import os
import time
import pytest
from datadog_checks.dev import docker_run, get_here
HOST = 'localhost:8888/pass'
URL = 'http://localhost:8888/pass/admin/api.php'
INSTANCE = {'host': HOST}
@pytest.fixture(scope='session')
def dd_environment_pass():
compose_file = os.path.join(get_here(), 'docker-compose.yaml')
with docker_run(compose_file, endpoints=[URL]):
time.sleep(10)
yield instance_pass
@pytest.fixture
def instance_pass():
return INSTANCE.copy()
| import os
import time
import pytest
from datadog_checks.dev import docker_run, get_here
HOST = 'localhost:8888/pass'
URL = 'http://localhost:8888/pass/admin/api.php'
INSTANCE = {'host': HOST}
@pytest.fixture(scope='session')
def dd_environment_pass():
compose_file = os.path.join(get_here(), 'docker-compose.yaml')
with docker_run(compose_file, endpoints=[URL]):
time.sleep(10)
yield instance_pass
@pytest.fixture
def instance_pass():
return INSTANCE.copy() | none | 1 | 1.797223 | 2 |
|
upbitpy/TradeShow.py | wonseok0403/upbitpy | 0 | 6633076 | <filename>upbitpy/TradeShow.py<gh_stars>0
from upbitpy import Upbitpy
upbitpy = Upbitpy()
allMarket = upbitpy.get_market_all()
KRWList = []
BTCList = []
def GetBTCList(MarketList):
List = []
for i in MarketList :
if( i['market'][0:3] == 'BTC' ) :
List.append(i['market'])
global BTCList
BTCList = List
def GetKRWList(MarketList):
List = []
for i in MarketList :
if( i['market'][0:3] == 'KRW' ) :
List.append(i['market'])
global KRWList
KRWList = List
def CanTradeKRWByBTC(KRWList, BTCList) :
List = []
for i in KRWList :
for j in BTCList :
if( i.split('-')[1] == j.split('-')[1] ) :
print(i)
GetKRWList(allMarket)
GetBTCList(allMarket)
print(BTCList)
CanTradeKRWByBTC(KRWList, BTCList) | <filename>upbitpy/TradeShow.py<gh_stars>0
from upbitpy import Upbitpy
upbitpy = Upbitpy()
allMarket = upbitpy.get_market_all()
KRWList = []
BTCList = []
def GetBTCList(MarketList):
List = []
for i in MarketList :
if( i['market'][0:3] == 'BTC' ) :
List.append(i['market'])
global BTCList
BTCList = List
def GetKRWList(MarketList):
List = []
for i in MarketList :
if( i['market'][0:3] == 'KRW' ) :
List.append(i['market'])
global KRWList
KRWList = List
def CanTradeKRWByBTC(KRWList, BTCList) :
List = []
for i in KRWList :
for j in BTCList :
if( i.split('-')[1] == j.split('-')[1] ) :
print(i)
GetKRWList(allMarket)
GetBTCList(allMarket)
print(BTCList)
CanTradeKRWByBTC(KRWList, BTCList) | none | 1 | 2.751129 | 3 |
|
frappe-bench/apps/erpnext/erpnext/regional/india/__init__.py | Semicheche/foa_frappe_docker | 0 | 6633077 | <filename>frappe-bench/apps/erpnext/erpnext/regional/india/__init__.py
states = [
'',
'Andaman and Nicobar Islands',
'Andhra Pradesh',
'Arunachal Pradesh',
'Assam',
'Bihar',
'Chandigarh',
'Chhattisgarh',
'Dadra and Nagar Haveli',
'Daman and Diu',
'Delhi',
'Goa',
'Gujarat',
'Haryana',
'Himachal Pradesh',
'Jammu and Kashmir',
'Jharkhand',
'Karnataka',
'Kerala',
'Lakshadweep Islands',
'Madhya Pradesh',
'Maharashtra',
'Manipur',
'Meghalaya',
'Mizoram',
'Nagaland',
'Odisha',
'Other Territory',
'Pondicherry',
'Punjab',
'Rajasthan',
'Sikkim',
'Tamil Nadu',
'Telangana',
'Tripura',
'Uttar Pradesh',
'Uttarakhand',
'West Bengal',
]
state_numbers = {
"Andaman and Nicobar Islands": "35",
"Andhra Pradesh": "37",
"Arunachal Pradesh": "12",
"Assam": "18",
"Bihar": "10",
"Chandigarh": "04",
"Chhattisgarh": "22",
"Dadra and Nagar Haveli": "26",
"Daman and Diu": "25",
"Delhi": "07",
"Goa": "30",
"Gujarat": "24",
"Haryana": "06",
"Himachal Pradesh": "02",
"Jammu and Kashmir": "01",
"Jharkhand": "20",
"Karnataka": "29",
"Kerala": "32",
"Lakshadweep Islands": "31",
"Madhya Pradesh": "23",
"Maharashtra": "27",
"Manipur": "14",
"Meghalaya": "17",
"Mizoram": "15",
"Nagaland": "13",
"Odisha": "21",
"Other Territory": "98",
"Pondicherry": "34",
"Punjab": "03",
"Rajasthan": "08",
"Sikkim": "11",
"Tamil Nadu": "33",
"Telangana": "36",
"Tripura": "16",
"Uttar Pradesh": "09",
"Uttarakhand": "05",
"West Bengal": "19",
} | <filename>frappe-bench/apps/erpnext/erpnext/regional/india/__init__.py
states = [
'',
'Andaman and Nicobar Islands',
'Andhra Pradesh',
'Arunachal Pradesh',
'Assam',
'Bihar',
'Chandigarh',
'Chhattisgarh',
'Dadra and Nagar Haveli',
'Daman and Diu',
'Delhi',
'Goa',
'Gujarat',
'Haryana',
'Himachal Pradesh',
'Jammu and Kashmir',
'Jharkhand',
'Karnataka',
'Kerala',
'Lakshadweep Islands',
'Madhya Pradesh',
'Maharashtra',
'Manipur',
'Meghalaya',
'Mizoram',
'Nagaland',
'Odisha',
'Other Territory',
'Pondicherry',
'Punjab',
'Rajasthan',
'Sikkim',
'Tamil Nadu',
'Telangana',
'Tripura',
'Uttar Pradesh',
'Uttarakhand',
'West Bengal',
]
state_numbers = {
"Andaman and Nicobar Islands": "35",
"Andhra Pradesh": "37",
"Arunachal Pradesh": "12",
"Assam": "18",
"Bihar": "10",
"Chandigarh": "04",
"Chhattisgarh": "22",
"Dadra and Nagar Haveli": "26",
"Daman and Diu": "25",
"Delhi": "07",
"Goa": "30",
"Gujarat": "24",
"Haryana": "06",
"Himachal Pradesh": "02",
"Jammu and Kashmir": "01",
"Jharkhand": "20",
"Karnataka": "29",
"Kerala": "32",
"Lakshadweep Islands": "31",
"Madhya Pradesh": "23",
"Maharashtra": "27",
"Manipur": "14",
"Meghalaya": "17",
"Mizoram": "15",
"Nagaland": "13",
"Odisha": "21",
"Other Territory": "98",
"Pondicherry": "34",
"Punjab": "03",
"Rajasthan": "08",
"Sikkim": "11",
"Tamil Nadu": "33",
"Telangana": "36",
"Tripura": "16",
"Uttar Pradesh": "09",
"Uttarakhand": "05",
"West Bengal": "19",
} | none | 1 | 1.36847 | 1 |
|
py/garage/garage/http/handlers.py | clchiou/garage | 3 | 6633078 | <reponame>clchiou/garage<filename>py/garage/garage/http/handlers.py
"""HTTP request handlers."""
__all__ = [
'ApiEndpointHandler',
'UriPath',
'add_date_to_headers',
'parse_request',
]
from pathlib import PurePosixPath as UriPath
import datetime
import urllib.parse
import http2
from garage.assertions import ASSERT
from . import servers
class ApiEndpointHandler:
"""Request handler of an API endpoint."""
def __init__(self, endpoint, *,
decode=lambda headers, data: data,
encode=lambda headers, data: data,
make_response_headers=lambda request_headers: ()):
self.endpoint = endpoint
self.decode = decode
self.encode = encode
self.make_response_headers = make_response_headers
async def __call__(self, stream):
request = stream.request
input = self.decode(request.headers, request.body)
output = self.encode(request.headers, await self.endpoint(input))
headers = [(b'content-length', b'%d' % len(output))]
headers.extend(self.make_response_headers(request.headers))
await stream.submit_response(
http2.Response(headers=headers, body=output))
def parse_request(request) -> urllib.parse.SplitResult:
if not request.scheme:
raise servers.ClientError(http2.Status.BAD_REQUEST)
if not request.path:
raise servers.ClientError(http2.Status.BAD_REQUEST)
authority = request.authority
if not authority:
for header, value in request.headers:
if header != b'Host':
continue
if authority:
msg = 'duplicate "Host" header: %r, %r' % (authority, value)
raise servers.ClientError(
http2.Status.BAD_REQUEST, internal_message=msg)
authority = value
if not authority:
raise servers.ClientError(http2.Status.BAD_REQUEST)
try:
uri = b'%s://%s%s' % (request.scheme.value, authority, request.path)
result = urllib.parse.urlsplit(uri.decode('ascii'))
return result._replace(path=UriPath(result.path))
except Exception as exc:
raise servers.ClientError(http2.Status.BAD_REQUEST) from exc
def add_date_to_headers(headers):
"""Add 'Date' field to headers without checking its presence.
This modifies headers *in place*.
"""
headers.append((b'Date', _rfc_7231_date()))
RFC_7231_FORMAT = \
'{day_name}, {day:02d} {month} {year:04d} {hour:02d}:{minute:02d}:{second:02d} GMT'
RFC_7231_MONTHS = (
'Jan', 'Feb', 'Mar',
'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec',
)
RFC_7231_DAY_NAMES = (
'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun',
)
def _rfc_7231_date(now=None):
if not now:
now = datetime.datetime.utcnow()
# We can't handle non-UTC time zone at the moment.
ASSERT.none(now.tzinfo)
formatted = RFC_7231_FORMAT.format(
year=now.year,
month=RFC_7231_MONTHS[now.month - 1],
day_name=RFC_7231_DAY_NAMES[now.weekday()],
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
)
return formatted.encode('ascii')
| """HTTP request handlers."""
__all__ = [
'ApiEndpointHandler',
'UriPath',
'add_date_to_headers',
'parse_request',
]
from pathlib import PurePosixPath as UriPath
import datetime
import urllib.parse
import http2
from garage.assertions import ASSERT
from . import servers
class ApiEndpointHandler:
"""Request handler of an API endpoint."""
def __init__(self, endpoint, *,
decode=lambda headers, data: data,
encode=lambda headers, data: data,
make_response_headers=lambda request_headers: ()):
self.endpoint = endpoint
self.decode = decode
self.encode = encode
self.make_response_headers = make_response_headers
async def __call__(self, stream):
request = stream.request
input = self.decode(request.headers, request.body)
output = self.encode(request.headers, await self.endpoint(input))
headers = [(b'content-length', b'%d' % len(output))]
headers.extend(self.make_response_headers(request.headers))
await stream.submit_response(
http2.Response(headers=headers, body=output))
def parse_request(request) -> urllib.parse.SplitResult:
if not request.scheme:
raise servers.ClientError(http2.Status.BAD_REQUEST)
if not request.path:
raise servers.ClientError(http2.Status.BAD_REQUEST)
authority = request.authority
if not authority:
for header, value in request.headers:
if header != b'Host':
continue
if authority:
msg = 'duplicate "Host" header: %r, %r' % (authority, value)
raise servers.ClientError(
http2.Status.BAD_REQUEST, internal_message=msg)
authority = value
if not authority:
raise servers.ClientError(http2.Status.BAD_REQUEST)
try:
uri = b'%s://%s%s' % (request.scheme.value, authority, request.path)
result = urllib.parse.urlsplit(uri.decode('ascii'))
return result._replace(path=UriPath(result.path))
except Exception as exc:
raise servers.ClientError(http2.Status.BAD_REQUEST) from exc
def add_date_to_headers(headers):
"""Add 'Date' field to headers without checking its presence.
This modifies headers *in place*.
"""
headers.append((b'Date', _rfc_7231_date()))
RFC_7231_FORMAT = \
'{day_name}, {day:02d} {month} {year:04d} {hour:02d}:{minute:02d}:{second:02d} GMT'
RFC_7231_MONTHS = (
'Jan', 'Feb', 'Mar',
'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec',
)
RFC_7231_DAY_NAMES = (
'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun',
)
def _rfc_7231_date(now=None):
if not now:
now = datetime.datetime.utcnow()
# We can't handle non-UTC time zone at the moment.
ASSERT.none(now.tzinfo)
formatted = RFC_7231_FORMAT.format(
year=now.year,
month=RFC_7231_MONTHS[now.month - 1],
day_name=RFC_7231_DAY_NAMES[now.weekday()],
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
)
return formatted.encode('ascii') | en | 0.806232 | HTTP request handlers. Request handler of an API endpoint. Add 'Date' field to headers without checking its presence. This modifies headers *in place*. # We can't handle non-UTC time zone at the moment. | 2.914008 | 3 |
old-projects/calculator.py | waitblock/gists | 3 | 6633079 | z=0
x=int(raw_input("\nType in your first number:"))
y=int(raw_input("\nType in your second number:"))
i=int(raw_input("\nWhat operation do you want to do? (enter 1=+, 2=-, 3=*, 4=/):"))
while z == 0:
if i==1:
a=x+y
print (a)
z=1
if i==2:
a=x-y
print (a)
z=1
if i == 3:
a=x*y
print (a)
z=1
if i == 4:
a=x/y
print (a)
z=1
else:
z=1
| z=0
x=int(raw_input("\nType in your first number:"))
y=int(raw_input("\nType in your second number:"))
i=int(raw_input("\nWhat operation do you want to do? (enter 1=+, 2=-, 3=*, 4=/):"))
while z == 0:
if i==1:
a=x+y
print (a)
z=1
if i==2:
a=x-y
print (a)
z=1
if i == 3:
a=x*y
print (a)
z=1
if i == 4:
a=x/y
print (a)
z=1
else:
z=1
| none | 1 | 3.7961 | 4 |
|
modules/ts_utils.py | alceballosa/dengue-project | 0 | 6633080 |
from statsmodels.tsa.seasonal import seasonal_decompose
def within_maximum_range(val, max_val, thres = 0.020):
val = abs(val)
max_val = abs(max_val)
diff = abs(val-max_val)
if diff < thres:
return True
else:
return False
def normalize_timeseries(df, mode = "MES", cols_to_normalize = []):
df_norm = df[cols_to_normalize].copy()
if mode == "NO":
return df_norm, None, None
elif mode == "MES":
df_norm["MES"] = df_norm.index.month
promedios_mensuales = df_norm.groupby("MES").mean()
desv_esta_mensuales = df_norm.groupby("MES").std()
prom = promedios_mensuales.values[:,0:]
std = desv_esta_mensuales.values[:,0:]
for i in range(len(df)):
mes = int(df_norm.iloc[i,:].MES - 1)
for j, col in enumerate(df_norm.columns[:-2]):
df_norm.iloc[i,j] = (df_norm.iloc[i,j]-prom[mes,j])/std[mes,j]
return df_norm, promedios_mensuales, desv_esta_mensuales
elif mode == "SEASONAL_DECOMPOSE":
df_norm = df[cols_to_normalize].copy()
for col in cols_to_normalize[:-2]:
decomposed = seasonal_decompose(df[col].dropna(),model='additive', period = 52)
df_norm[col] = decomposed.resid+decomposed.trend
df_norm = df_norm.dropna()
return df_norm, None, None
def lagged_corr(df, weeks, var, window):
return df[str(var)].shift(periods=weeks).iloc[weeks:].corr(df['DENGUE_PER_100K'].iloc[weeks:], min_periods=window, method = "pearson")
|
from statsmodels.tsa.seasonal import seasonal_decompose
def within_maximum_range(val, max_val, thres = 0.020):
val = abs(val)
max_val = abs(max_val)
diff = abs(val-max_val)
if diff < thres:
return True
else:
return False
def normalize_timeseries(df, mode = "MES", cols_to_normalize = []):
df_norm = df[cols_to_normalize].copy()
if mode == "NO":
return df_norm, None, None
elif mode == "MES":
df_norm["MES"] = df_norm.index.month
promedios_mensuales = df_norm.groupby("MES").mean()
desv_esta_mensuales = df_norm.groupby("MES").std()
prom = promedios_mensuales.values[:,0:]
std = desv_esta_mensuales.values[:,0:]
for i in range(len(df)):
mes = int(df_norm.iloc[i,:].MES - 1)
for j, col in enumerate(df_norm.columns[:-2]):
df_norm.iloc[i,j] = (df_norm.iloc[i,j]-prom[mes,j])/std[mes,j]
return df_norm, promedios_mensuales, desv_esta_mensuales
elif mode == "SEASONAL_DECOMPOSE":
df_norm = df[cols_to_normalize].copy()
for col in cols_to_normalize[:-2]:
decomposed = seasonal_decompose(df[col].dropna(),model='additive', period = 52)
df_norm[col] = decomposed.resid+decomposed.trend
df_norm = df_norm.dropna()
return df_norm, None, None
def lagged_corr(df, weeks, var, window):
return df[str(var)].shift(periods=weeks).iloc[weeks:].corr(df['DENGUE_PER_100K'].iloc[weeks:], min_periods=window, method = "pearson")
| none | 1 | 2.443657 | 2 |
|
Bindings/Python/tests/test_states_trajectory.py | justicelee/opensim-core | 2 | 6633081 | <reponame>justicelee/opensim-core<filename>Bindings/Python/tests/test_states_trajectory.py
import os
import unittest
import opensim as osim
test_dir = os.path.join(os.path.dirname(os.path.abspath(osim.__file__)),
'tests')
# Silence warning messages if mesh (.vtp) files cannot be found.
osim.Model.setDebugLevel(0)
# TODO add more tests of the integrity checks.
class TestStatesTrajectory(unittest.TestCase):
states_sto_fname = "test_states_trajectory_gait1018_states.sto"
def test_index_and_iterator(self):
if os.path.exists(self.states_sto_fname):
os.remove(self.states_sto_fname)
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
model.initSystem()
forward = osim.ForwardTool()
forward.setModel(model)
forward.setName('test_states_trajectory_gait1018')
forward.setFinalTime(0.1)
forward.run()
states = osim.StatesTrajectory.createFromStatesStorage(
model, self.states_sto_fname)
# Test indexing into the states container.
model.getTotalMass(states[0])
count = 0
for i in range(states.getSize()):
model.calcMassCenterVelocity(states.get(i))
count += 1
# Test iterator.
count_iter = 0
for state in states:
model.calcMassCenterPosition(state)
count_iter += 1
assert count == count_iter
def test_modify_states(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
model.initSystem()
states = osim.StatesTrajectory.createFromStatesStorage(
model, self.states_sto_fname)
states[0].setTime(4)
assert states[0].getTime() == 4
self.assertNotAlmostEqual(model.getStateVariableValue(states[2],
"ground_pelvis/pelvis_tilt/value"), 8)
model.setStateVariableValue(states[2],
"ground_pelvis/pelvis_tilt/value", 8)
self.assertAlmostEqual(model.getStateVariableValue(states[2],
"ground_pelvis/pelvis_tilt/value"), 8)
# Assigning is not allowed, since it easily allows people to violate
# the ordering of the trajectory.
# Also, the assignment `states.upd(5) = states[2]` is not possible in
# Python ('can't assign to function call').
def test_setitem():
states[5] = states[2]
self.assertRaises(TypeError, test_setitem)
def test_states_storage_optional_arguments(self):
# Try all combinations of optional arguments, just to ensure the
# wrapping works.
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
model.initSystem()
sto = osim.Storage(self.states_sto_fname)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto, False, False)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto, False, True)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto, True, False)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto, True, True)
def test_populate_trajectory(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
state = model.initSystem()
states = osim.StatesTrajectory()
states.append(state)
state.setTime(1.0)
states.append(state)
self.assertEqual(states.getSize(), 2)
self.assertEqual(states[1].getTime(), 1.0)
def test_out_of_range(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
state = model.initSystem()
states = osim.StatesTrajectory()
states.append(state)
state.setTime(1.0)
states.append(state)
# TODO this exception message could be better...
self.assertRaises(RuntimeError, lambda: states[2].getTime())
def test_integrity_checks(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
state = model.initSystem()
states = osim.StatesTrajectory()
states.append(state)
state.setTime(1.0)
states.append(state)
self.assertTrue(states.isNondecreasingInTime())
self.assertTrue(states.isConsistent())
self.assertTrue(states.hasIntegrity())
# Cannot append a state with an earlier time than the last one.
state.setTime(0.5)
self.assertRaises(RuntimeError, states.append, state)
# However, since python doesn't have constness, we can edit the time of
# a state in the trajectory.
state.setTime(1.5)
states.append(state)
self.assertTrue(states.isNondecreasingInTime())
states.back().setTime(0.25)
self.assertFalse(states.isNondecreasingInTime())
self.assertTrue(states.isConsistent())
self.assertFalse(states.hasIntegrity())
# TODO check violating isConsistent() (might need a different model).
def test_reporter(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
rep = osim.StatesTrajectoryReporter()
rep.setName('reporter')
rep.set_report_time_interval(0.01)
model.addComponent(rep)
model.initSystem()
forward = osim.ForwardTool()
forward.setModel(model)
forward.setName('test_states_trajectory_reporter_gait1018')
forward.setFinalTime(0.05)
forward.run()
states = rep.getStates()
assert states.getSize() == 6
for i in range(6):
assert states[i].getTime() == i * 0.01
| import os
import unittest
import opensim as osim
test_dir = os.path.join(os.path.dirname(os.path.abspath(osim.__file__)),
'tests')
# Silence warning messages if mesh (.vtp) files cannot be found.
osim.Model.setDebugLevel(0)
# TODO add more tests of the integrity checks.
class TestStatesTrajectory(unittest.TestCase):
states_sto_fname = "test_states_trajectory_gait1018_states.sto"
def test_index_and_iterator(self):
if os.path.exists(self.states_sto_fname):
os.remove(self.states_sto_fname)
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
model.initSystem()
forward = osim.ForwardTool()
forward.setModel(model)
forward.setName('test_states_trajectory_gait1018')
forward.setFinalTime(0.1)
forward.run()
states = osim.StatesTrajectory.createFromStatesStorage(
model, self.states_sto_fname)
# Test indexing into the states container.
model.getTotalMass(states[0])
count = 0
for i in range(states.getSize()):
model.calcMassCenterVelocity(states.get(i))
count += 1
# Test iterator.
count_iter = 0
for state in states:
model.calcMassCenterPosition(state)
count_iter += 1
assert count == count_iter
def test_modify_states(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
model.initSystem()
states = osim.StatesTrajectory.createFromStatesStorage(
model, self.states_sto_fname)
states[0].setTime(4)
assert states[0].getTime() == 4
self.assertNotAlmostEqual(model.getStateVariableValue(states[2],
"ground_pelvis/pelvis_tilt/value"), 8)
model.setStateVariableValue(states[2],
"ground_pelvis/pelvis_tilt/value", 8)
self.assertAlmostEqual(model.getStateVariableValue(states[2],
"ground_pelvis/pelvis_tilt/value"), 8)
# Assigning is not allowed, since it easily allows people to violate
# the ordering of the trajectory.
# Also, the assignment `states.upd(5) = states[2]` is not possible in
# Python ('can't assign to function call').
def test_setitem():
states[5] = states[2]
self.assertRaises(TypeError, test_setitem)
def test_states_storage_optional_arguments(self):
# Try all combinations of optional arguments, just to ensure the
# wrapping works.
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
model.initSystem()
sto = osim.Storage(self.states_sto_fname)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto, False, False)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto, False, True)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto, True, False)
states = osim.StatesTrajectory.createFromStatesStorage(
model, sto, True, True)
def test_populate_trajectory(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
state = model.initSystem()
states = osim.StatesTrajectory()
states.append(state)
state.setTime(1.0)
states.append(state)
self.assertEqual(states.getSize(), 2)
self.assertEqual(states[1].getTime(), 1.0)
def test_out_of_range(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
state = model.initSystem()
states = osim.StatesTrajectory()
states.append(state)
state.setTime(1.0)
states.append(state)
# TODO this exception message could be better...
self.assertRaises(RuntimeError, lambda: states[2].getTime())
def test_integrity_checks(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
state = model.initSystem()
states = osim.StatesTrajectory()
states.append(state)
state.setTime(1.0)
states.append(state)
self.assertTrue(states.isNondecreasingInTime())
self.assertTrue(states.isConsistent())
self.assertTrue(states.hasIntegrity())
# Cannot append a state with an earlier time than the last one.
state.setTime(0.5)
self.assertRaises(RuntimeError, states.append, state)
# However, since python doesn't have constness, we can edit the time of
# a state in the trajectory.
state.setTime(1.5)
states.append(state)
self.assertTrue(states.isNondecreasingInTime())
states.back().setTime(0.25)
self.assertFalse(states.isNondecreasingInTime())
self.assertTrue(states.isConsistent())
self.assertFalse(states.hasIntegrity())
# TODO check violating isConsistent() (might need a different model).
def test_reporter(self):
model = osim.Model(os.path.join(test_dir,
"gait10dof18musc_subject01.osim"))
rep = osim.StatesTrajectoryReporter()
rep.setName('reporter')
rep.set_report_time_interval(0.01)
model.addComponent(rep)
model.initSystem()
forward = osim.ForwardTool()
forward.setModel(model)
forward.setName('test_states_trajectory_reporter_gait1018')
forward.setFinalTime(0.05)
forward.run()
states = rep.getStates()
assert states.getSize() == 6
for i in range(6):
assert states[i].getTime() == i * 0.01 | en | 0.81023 | # Silence warning messages if mesh (.vtp) files cannot be found. # TODO add more tests of the integrity checks. # Test indexing into the states container. # Test iterator. # Assigning is not allowed, since it easily allows people to violate # the ordering of the trajectory. # Also, the assignment `states.upd(5) = states[2]` is not possible in # Python ('can't assign to function call'). # Try all combinations of optional arguments, just to ensure the # wrapping works. # TODO this exception message could be better... # Cannot append a state with an earlier time than the last one. # However, since python doesn't have constness, we can edit the time of # a state in the trajectory. # TODO check violating isConsistent() (might need a different model). | 2.26788 | 2 |
extensions/language.py | yiays/merely | 0 | 6633082 | import discord
from discord.ext import commands
import re
class Language(commands.cog.Cog):
def __init__(self, bot:commands.Bot):
self.bot = bot
# ensure config file has required data
if not bot.config.has_section('language'):
bot.config.add_section('language')
@commands.group()
async def language(self, ctx:commands.Context):
"""user-facing setter/getter for guild/user language options"""
if ctx.invoked_subcommand is None:
await self.language_list(ctx)
@language.command(name='list')
async def language_list(self, ctx:commands.Context):
embed = discord.Embed(title = self.bot.babel(ctx, 'language', 'list_title'),
description = self.bot.babel(ctx, 'language', 'set_howto')+\
'\n'+self.bot.babel(ctx, 'language', 'contribute_cta') if self.bot.config['language']['contribute_url'] else '',
color = int(self.bot.config['main']['themecolor'], 16))
for langcode,language in self.bot.babel.langs.items():
embed.add_field(name=f"{language.get('meta', 'name')} ({langcode})",
value=f"{language.get('meta', 'contributors', fallback=self.bot.babel(ctx, 'language', 'unknown_contributors'))}\n"+\
self.bot.babel(ctx, 'language', 'coverage_label', coverage = self.bot.babel.calculate_coverage(langcode)))
await ctx.reply(embed=embed)
@language.command(name='get')
async def language_get(self, ctx:commands.Context):
langs, origins = self.bot.babel.resolve_lang(ctx, debug=True)
language = self.bot.babel.langs[langs[0]]
embed = discord.Embed(title = f"{language.get('meta', 'name')} ({langs[0]})",
description = self.bot.babel(ctx, 'language', 'origin_reason_'+origins[0]),
color = int(self.bot.config['main']['themecolor'], 16))
await ctx.reply(embed=embed)
@language.command(name='set')
async def language_set(self, ctx:commands.Context, langcode:str):
if re.match(r'[a-z]{2}(-[A-Z]{2})?$', langcode) is None:
await ctx.reply(self.bot.babel(ctx, 'language', 'set_failed_invalid_pattern'))
else:
langcode = self.bot.config.get('language', 'prefix', fallback='')+langcode
if isinstance(ctx.channel, discord.abc.PrivateChannel) or not ctx.author.guild_permissions.administrator:
usermode = True
self.bot.config.set('language', str(ctx.author.id), langcode)
else:
usermode = False
self.bot.config.set('language', str(ctx.guild.id), langcode)
self.bot.config.save()
if langcode in self.bot.babel.langs.keys():
await ctx.reply(self.bot.babel(ctx,
'language',
'set_success',
language=self.bot.babel.langs[langcode].get('meta', 'name'),
usermode=usermode))
else:
await ctx.reply(self.bot.babel(ctx, 'language', 'set_warning_no_match')+'\n'+self.bot.babel(ctx, 'language', 'contribute_cta'))
def setup(bot):
bot.add_cog(Language(bot)) | import discord
from discord.ext import commands
import re
class Language(commands.cog.Cog):
def __init__(self, bot:commands.Bot):
self.bot = bot
# ensure config file has required data
if not bot.config.has_section('language'):
bot.config.add_section('language')
@commands.group()
async def language(self, ctx:commands.Context):
"""user-facing setter/getter for guild/user language options"""
if ctx.invoked_subcommand is None:
await self.language_list(ctx)
@language.command(name='list')
async def language_list(self, ctx:commands.Context):
embed = discord.Embed(title = self.bot.babel(ctx, 'language', 'list_title'),
description = self.bot.babel(ctx, 'language', 'set_howto')+\
'\n'+self.bot.babel(ctx, 'language', 'contribute_cta') if self.bot.config['language']['contribute_url'] else '',
color = int(self.bot.config['main']['themecolor'], 16))
for langcode,language in self.bot.babel.langs.items():
embed.add_field(name=f"{language.get('meta', 'name')} ({langcode})",
value=f"{language.get('meta', 'contributors', fallback=self.bot.babel(ctx, 'language', 'unknown_contributors'))}\n"+\
self.bot.babel(ctx, 'language', 'coverage_label', coverage = self.bot.babel.calculate_coverage(langcode)))
await ctx.reply(embed=embed)
@language.command(name='get')
async def language_get(self, ctx:commands.Context):
langs, origins = self.bot.babel.resolve_lang(ctx, debug=True)
language = self.bot.babel.langs[langs[0]]
embed = discord.Embed(title = f"{language.get('meta', 'name')} ({langs[0]})",
description = self.bot.babel(ctx, 'language', 'origin_reason_'+origins[0]),
color = int(self.bot.config['main']['themecolor'], 16))
await ctx.reply(embed=embed)
@language.command(name='set')
async def language_set(self, ctx:commands.Context, langcode:str):
if re.match(r'[a-z]{2}(-[A-Z]{2})?$', langcode) is None:
await ctx.reply(self.bot.babel(ctx, 'language', 'set_failed_invalid_pattern'))
else:
langcode = self.bot.config.get('language', 'prefix', fallback='')+langcode
if isinstance(ctx.channel, discord.abc.PrivateChannel) or not ctx.author.guild_permissions.administrator:
usermode = True
self.bot.config.set('language', str(ctx.author.id), langcode)
else:
usermode = False
self.bot.config.set('language', str(ctx.guild.id), langcode)
self.bot.config.save()
if langcode in self.bot.babel.langs.keys():
await ctx.reply(self.bot.babel(ctx,
'language',
'set_success',
language=self.bot.babel.langs[langcode].get('meta', 'name'),
usermode=usermode))
else:
await ctx.reply(self.bot.babel(ctx, 'language', 'set_warning_no_match')+'\n'+self.bot.babel(ctx, 'language', 'contribute_cta'))
def setup(bot):
bot.add_cog(Language(bot)) | en | 0.748364 | # ensure config file has required data user-facing setter/getter for guild/user language options | 2.437822 | 2 |
ixtlilton_tools/_private_tools/exceptions.py | uibcdf/Ixtlilton | 0 | 6633083 | class BadCallError(ValueError):
def __init__(self, message=None):
if message is None:
message = 'Wrong way of invoking this method. Check the online documentation for more information: http://www.uibcdf.org/MolSysMT'
super().__init__(message)
class NotImplementedError(NotImplementedError):
def __init__(self, message=None):
if message is None:
message = 'It has not been implemeted yet. Write a new issue in https://github.com/uibcdf/MolSysMT/issues asking for it.'
super().__init__(message)
class LibraryNotFound(ValueError):
def __init__(self, library):
message = 'The python library {} was not found.'.format(library)
super().__init__(message)
class NoAdminRights(ValueError):
def __init__(self, message=None):
if message is None:
message = 'This method needs administration rights.'
super().__init__(message)
class DirectoryConflict(ValueError):
def __init__(self, message=None):
if message is None:
message = 'There is a directory conflict'
super().__init__(message)
class NoUIDsAvailable(ValueError):
def __init__(self, message=None):
if message is None:
message = 'All user ids between 2000 and 2999 are already taken.'
super().__init__(message)
class UserDoesNotExist(ValueError):
def __init__(self, username=None, message=None):
if message is None:
if username is None:
message = 'The user does not exists.'
else:
message = f'The user {username} does not exists.'
super().__init__(message)
class GroupDoesNotExist(ValueError):
def __init__(self, groupname=None, message=None):
if message is None:
if groupname is None:
message = 'The group does not exists.'
else:
message = f'The group {groupname} does not exists.'
super().__init__(message)
| class BadCallError(ValueError):
def __init__(self, message=None):
if message is None:
message = 'Wrong way of invoking this method. Check the online documentation for more information: http://www.uibcdf.org/MolSysMT'
super().__init__(message)
class NotImplementedError(NotImplementedError):
def __init__(self, message=None):
if message is None:
message = 'It has not been implemeted yet. Write a new issue in https://github.com/uibcdf/MolSysMT/issues asking for it.'
super().__init__(message)
class LibraryNotFound(ValueError):
def __init__(self, library):
message = 'The python library {} was not found.'.format(library)
super().__init__(message)
class NoAdminRights(ValueError):
def __init__(self, message=None):
if message is None:
message = 'This method needs administration rights.'
super().__init__(message)
class DirectoryConflict(ValueError):
def __init__(self, message=None):
if message is None:
message = 'There is a directory conflict'
super().__init__(message)
class NoUIDsAvailable(ValueError):
def __init__(self, message=None):
if message is None:
message = 'All user ids between 2000 and 2999 are already taken.'
super().__init__(message)
class UserDoesNotExist(ValueError):
def __init__(self, username=None, message=None):
if message is None:
if username is None:
message = 'The user does not exists.'
else:
message = f'The user {username} does not exists.'
super().__init__(message)
class GroupDoesNotExist(ValueError):
def __init__(self, groupname=None, message=None):
if message is None:
if groupname is None:
message = 'The group does not exists.'
else:
message = f'The group {groupname} does not exists.'
super().__init__(message)
| none | 1 | 2.702277 | 3 |
|
tensorflow_checkpoint_reader/core.py | shawwn/tensorflow-checkpoint-reader | 1 | 6633084 | import struct
import re
import functools
@functools.total_ordering
class StringPiece:
npos = -1
def __init__(self, value=None, size=None, offset=None):
self.set(value)
if offset is not None:
self.remove_prefix(offset)
if size is not None:
self.remove_suffix(self.size() - size)
def data(self):
return StringPiece(self._ptr, self._length, self._offset)
def slice(self) -> bytearray:
return self._ptr[self._offset:self._offset + self._length]
def bytes(self) -> bytes:
return bytes(self)
def memoryview(self) -> memoryview:
return memoryview(self.slice())
def set(self, other):
if other is None:
self._ptr = bytearray()
self._length = 0
self._offset = 0
elif isinstance(other, str):
self._ptr = bytearray(other, 'utf-8')
self._length = len(other)
self._offset = 0
elif isinstance(other, bytes):
self._ptr = bytearray(other)
self._length = len(other)
self._offset = 0
elif isinstance(other, bytearray):
self._ptr = other
self._length = len(other)
self._offset = 0
elif isinstance(other, StringPiece):
self._ptr = other._ptr
self._length = other._length
self._offset = other._offset
else:
raise TypeError("Expected stringlike")
def at(self, pos: int):
assert 0 <= pos < self._length
return self._ptr[self._offset + pos]
def __getitem__(self, item):
return self.at(item)
def __setitem__(self, pos, value):
assert 0 <= pos < self._length
self._ptr[self._offset + pos] = value
def length(self) -> int:
"""Returns the number of characters in the `string_view`."""
return self._length
def size(self) -> int:
"""Returns the number of characters in the `string_view`. Alias for `size()`."""
return self.length()
def empty(self) -> bool:
"""Checks if the `string_view` is empty (refers to no characters)."""
return self._length <= 0
def advance(self, n: int):
if n < 0:
if -n > self._offset:
raise ValueError(f"Can't advance by {n}")
elif n > 0:
if n > self._length:
raise ValueError(f"Can't advance by {n}")
self._offset += n
self._length -= n
def remove_prefix(self, n: int):
"""Removes the first `n` characters from the `string_view`. Note that the
underlying string is not changed, only the view."""
if n > self._length or n < 0:
raise ValueError(f"Can't remove {n} bytes")
self._offset += n
self._length -= n
def remove_suffix(self, n: int):
"""Removes the last `n` characters from the `string_view`. Note that the
underlying string is not changed, only the view."""
if n > self._length or n < 0:
raise ValueError(f"Can't remove {n} bytes")
self._length -= n
def find(self, target):
return self.slice().find(string_view(target).slice())
def rfind(self, target):
return self.slice().rfind(string_view(target).slice())
def find_first_of(self, chars):
# chars = list(string_view(chars).slice())
# me = self.slice()
# for i, c in enumerate(me):
# if c in chars:
# return i
# return self.npos
pat = b'[' + re.escape(string_view(chars).slice()) + b']'
me = self.slice()
match = re.search(pat, me)
if match is None:
return self.npos
else:
return match.start()
def substr(self, pos: int, n: int = npos):
s = self.data()
s.advance(pos)
if n != StringPiece.npos:
s.remove_suffix(len(s) - n)
return s
# s = self.slice()
# if pos < 0:
# pos += len(s)
# if n == StringPiece.npos:
# return s[pos:]
# else:
# return s[pos:pos + n]
def begin(self):
return StringPiece(self._ptr, self._length, self._offset)
def end(self):
return StringPiece(self._ptr, 0, self._offset + self._length)
def __bytes__(self):
return bytes(self.slice())
def __str__(self):
return self.slice().decode('utf-8')
def __repr__(self):
return f"StringPiece({self.slice()!r})"
def __len__(self):
return self.length()
def __add__(self, other):
assert isinstance(other, int)
r = StringPiece(self)
r.advance(other)
return r
def __sub__(self, other):
if isinstance(other, StringPiece):
if other._ptr is not self._ptr:
raise ValueError("Can only subtract pointers to same underlying data")
return self._offset - other._offset
r = StringPiece(self)
r.advance(-other)
return r
def __eq__(self, other):
if isinstance(other, StringPiece):
if other._ptr is self._ptr:
if other._offset == self._offset:
return other._length == self._length
return False
def __lt__(self, other):
if isinstance(other, StringPiece):
if other._ptr is self._ptr:
return self._offset < other._offset
return False
def __bool__(self):
return not self.empty()
def __iadd__(self, n):
assert isinstance(n, int)
self.advance(n)
return self
def __isub__(self, n):
assert isinstance(n, int)
self.advance(n)
return self
def string_view(data = None, length: int = None, offset: int = None) -> StringPiece:
return StringPiece(data, length, offset)
def get_varint_64(input: StringPiece):
result = 0
p = input.slice()
i = 0
for shift in range(0, 64, 7):
byte = p[i]
i += 1
if byte & 128:
# More bytes are present
result |= ((byte & 127) << shift)
else:
result |= (byte << shift)
input.remove_prefix(i)
return True, result
return False, None
def get_varint_32_ptr(data: bytes, p: int, limit: int):
if p < limit:
result = data[p]
if (result & 128) == 0:
value = result
return p + 1, value
return get_varint_32_ptr_fallback(data, p, limit)
def get_varint_32_ptr_fallback(data: bytes, p: int, limit: int):
result = 0
for shift in range(0, 29, 7):
if p >= limit:
break
byte = data[p]
p += 1
if byte & 128:
# More bytes are present
result |= (byte & 127) << shift
else:
result |= byte << shift
value = result
return p, value
return None, None
def decode_fixed_64(buffer, offset=0):
return struct.unpack_from('<Q', buffer, offset=offset)[0]
def decode_fixed_32(buffer, offset=0):
return struct.unpack_from('<L', buffer, offset=offset)[0]
| import struct
import re
import functools
@functools.total_ordering
class StringPiece:
npos = -1
def __init__(self, value=None, size=None, offset=None):
self.set(value)
if offset is not None:
self.remove_prefix(offset)
if size is not None:
self.remove_suffix(self.size() - size)
def data(self):
return StringPiece(self._ptr, self._length, self._offset)
def slice(self) -> bytearray:
return self._ptr[self._offset:self._offset + self._length]
def bytes(self) -> bytes:
return bytes(self)
def memoryview(self) -> memoryview:
return memoryview(self.slice())
def set(self, other):
if other is None:
self._ptr = bytearray()
self._length = 0
self._offset = 0
elif isinstance(other, str):
self._ptr = bytearray(other, 'utf-8')
self._length = len(other)
self._offset = 0
elif isinstance(other, bytes):
self._ptr = bytearray(other)
self._length = len(other)
self._offset = 0
elif isinstance(other, bytearray):
self._ptr = other
self._length = len(other)
self._offset = 0
elif isinstance(other, StringPiece):
self._ptr = other._ptr
self._length = other._length
self._offset = other._offset
else:
raise TypeError("Expected stringlike")
def at(self, pos: int):
assert 0 <= pos < self._length
return self._ptr[self._offset + pos]
def __getitem__(self, item):
return self.at(item)
def __setitem__(self, pos, value):
assert 0 <= pos < self._length
self._ptr[self._offset + pos] = value
def length(self) -> int:
"""Returns the number of characters in the `string_view`."""
return self._length
def size(self) -> int:
"""Returns the number of characters in the `string_view`. Alias for `size()`."""
return self.length()
def empty(self) -> bool:
"""Checks if the `string_view` is empty (refers to no characters)."""
return self._length <= 0
def advance(self, n: int):
if n < 0:
if -n > self._offset:
raise ValueError(f"Can't advance by {n}")
elif n > 0:
if n > self._length:
raise ValueError(f"Can't advance by {n}")
self._offset += n
self._length -= n
def remove_prefix(self, n: int):
"""Removes the first `n` characters from the `string_view`. Note that the
underlying string is not changed, only the view."""
if n > self._length or n < 0:
raise ValueError(f"Can't remove {n} bytes")
self._offset += n
self._length -= n
def remove_suffix(self, n: int):
"""Removes the last `n` characters from the `string_view`. Note that the
underlying string is not changed, only the view."""
if n > self._length or n < 0:
raise ValueError(f"Can't remove {n} bytes")
self._length -= n
def find(self, target):
return self.slice().find(string_view(target).slice())
def rfind(self, target):
return self.slice().rfind(string_view(target).slice())
def find_first_of(self, chars):
# chars = list(string_view(chars).slice())
# me = self.slice()
# for i, c in enumerate(me):
# if c in chars:
# return i
# return self.npos
pat = b'[' + re.escape(string_view(chars).slice()) + b']'
me = self.slice()
match = re.search(pat, me)
if match is None:
return self.npos
else:
return match.start()
def substr(self, pos: int, n: int = npos):
s = self.data()
s.advance(pos)
if n != StringPiece.npos:
s.remove_suffix(len(s) - n)
return s
# s = self.slice()
# if pos < 0:
# pos += len(s)
# if n == StringPiece.npos:
# return s[pos:]
# else:
# return s[pos:pos + n]
def begin(self):
return StringPiece(self._ptr, self._length, self._offset)
def end(self):
return StringPiece(self._ptr, 0, self._offset + self._length)
def __bytes__(self):
return bytes(self.slice())
def __str__(self):
return self.slice().decode('utf-8')
def __repr__(self):
return f"StringPiece({self.slice()!r})"
def __len__(self):
return self.length()
def __add__(self, other):
assert isinstance(other, int)
r = StringPiece(self)
r.advance(other)
return r
def __sub__(self, other):
if isinstance(other, StringPiece):
if other._ptr is not self._ptr:
raise ValueError("Can only subtract pointers to same underlying data")
return self._offset - other._offset
r = StringPiece(self)
r.advance(-other)
return r
def __eq__(self, other):
if isinstance(other, StringPiece):
if other._ptr is self._ptr:
if other._offset == self._offset:
return other._length == self._length
return False
def __lt__(self, other):
if isinstance(other, StringPiece):
if other._ptr is self._ptr:
return self._offset < other._offset
return False
def __bool__(self):
return not self.empty()
def __iadd__(self, n):
assert isinstance(n, int)
self.advance(n)
return self
def __isub__(self, n):
assert isinstance(n, int)
self.advance(n)
return self
def string_view(data = None, length: int = None, offset: int = None) -> StringPiece:
return StringPiece(data, length, offset)
def get_varint_64(input: StringPiece):
result = 0
p = input.slice()
i = 0
for shift in range(0, 64, 7):
byte = p[i]
i += 1
if byte & 128:
# More bytes are present
result |= ((byte & 127) << shift)
else:
result |= (byte << shift)
input.remove_prefix(i)
return True, result
return False, None
def get_varint_32_ptr(data: bytes, p: int, limit: int):
if p < limit:
result = data[p]
if (result & 128) == 0:
value = result
return p + 1, value
return get_varint_32_ptr_fallback(data, p, limit)
def get_varint_32_ptr_fallback(data: bytes, p: int, limit: int):
result = 0
for shift in range(0, 29, 7):
if p >= limit:
break
byte = data[p]
p += 1
if byte & 128:
# More bytes are present
result |= (byte & 127) << shift
else:
result |= byte << shift
value = result
return p, value
return None, None
def decode_fixed_64(buffer, offset=0):
return struct.unpack_from('<Q', buffer, offset=offset)[0]
def decode_fixed_32(buffer, offset=0):
return struct.unpack_from('<L', buffer, offset=offset)[0]
| en | 0.593323 | Returns the number of characters in the `string_view`. Returns the number of characters in the `string_view`. Alias for `size()`. Checks if the `string_view` is empty (refers to no characters). Removes the first `n` characters from the `string_view`. Note that the underlying string is not changed, only the view. Removes the last `n` characters from the `string_view`. Note that the underlying string is not changed, only the view. # chars = list(string_view(chars).slice()) # me = self.slice() # for i, c in enumerate(me): # if c in chars: # return i # return self.npos # s = self.slice() # if pos < 0: # pos += len(s) # if n == StringPiece.npos: # return s[pos:] # else: # return s[pos:pos + n] # More bytes are present # More bytes are present | 3.112394 | 3 |
wlra/vae.py | aksarkar/wlra | 7 | 6633085 | import torch
class Encoder(torch.nn.Module):
"""Encoder q(z | x) = N(mu(x), sigma^2(x))"""
def __init__(self, input_dim, output_dim):
super().__init__()
self.net = torch.nn.Sequential(
torch.nn.Linear(input_dim, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 128),
torch.nn.ReLU(),
)
self.mean = torch.nn.Linear(128, output_dim)
self.scale = torch.nn.Sequential(torch.nn.Linear(128, output_dim), torch.nn.Softplus())
def forward(self, x):
q = self.net(x)
return self.mean(q), self.scale(q)
class Pois(torch.nn.Module):
"""Decoder p(x | z) ~ Poisson(s_i lambda(z))"""
def __init__(self, input_dim, output_dim):
super().__init__()
self.lam = torch.nn.Sequential(
torch.nn.Linear(input_dim, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, output_dim),
torch.nn.Softplus(),
)
def forward(self, x):
return self.lam(x)
def kl_term(mean, scale):
"""KL divergence between N(mean, scale) and N(0, 1)"""
return .5 * (1 - 2 * torch.log(scale) + (mean * mean + scale * scale))
def pois_llik(x, mean):
"""Log likelihood of x distributed as Poisson"""
return x * torch.log(mean) - mean - torch.lgamma(x + 1)
class PVAE(torch.nn.Module):
def __init__(self, input_dim, latent_dim):
super().__init__()
self.encoder = Encoder(input_dim, latent_dim)
self.decoder = Pois(latent_dim, input_dim)
def loss(self, x, s, stoch_samples):
mean, scale = self.encoder.forward(x)
# [batch_size]
# Important: this is analytic
kl = torch.sum(kl_term(mean, scale), dim=1)
# [stoch_samples, batch_size, latent_dim]
qz = torch.distributions.Normal(mean, scale).rsample(stoch_samples)
# [stoch_samples, batch_size, input_dim]
lam = self.decoder.forward(qz)
error = torch.mean(torch.sum(pois_llik(x, lam), dim=2), dim=0)
# Important: optim minimizes
loss = -torch.sum(error - kl)
return loss
def fit(self, x, s, max_epochs, verbose=False, stoch_samples=10, **kwargs):
"""Fit the model
:param x: torch.tensor [n_cells, n_genes]
:param s: torch.tensor [n_cells, 1]
"""
if torch.cuda.is_available():
# Move the model and data to the GPU
self.cuda()
x = x.cuda()
s = s.cuda()
stoch_samples = torch.Size([stoch_samples])
opt = torch.optim.RMSprop(self.parameters(), **kwargs)
for epoch in range(max_epochs):
opt.zero_grad()
loss = self.loss(x, s, stoch_samples)
if torch.isnan(loss):
return self
loss.backward()
opt.step()
if verbose and not epoch % 10:
print(f'[epoch={epoch}] elbo={-loss}')
return self
@torch.no_grad()
def denoise(self, x):
if torch.cuda.is_available():
x = x.cuda()
# Plug E[z | x] into the decoder
lam = self.decoder.forward(self.encoder.forward(x)[0])
if torch.cuda.is_available():
lam = lam.cpu()
return lam.numpy()
| import torch
class Encoder(torch.nn.Module):
"""Encoder q(z | x) = N(mu(x), sigma^2(x))"""
def __init__(self, input_dim, output_dim):
super().__init__()
self.net = torch.nn.Sequential(
torch.nn.Linear(input_dim, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 128),
torch.nn.ReLU(),
)
self.mean = torch.nn.Linear(128, output_dim)
self.scale = torch.nn.Sequential(torch.nn.Linear(128, output_dim), torch.nn.Softplus())
def forward(self, x):
q = self.net(x)
return self.mean(q), self.scale(q)
class Pois(torch.nn.Module):
"""Decoder p(x | z) ~ Poisson(s_i lambda(z))"""
def __init__(self, input_dim, output_dim):
super().__init__()
self.lam = torch.nn.Sequential(
torch.nn.Linear(input_dim, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, output_dim),
torch.nn.Softplus(),
)
def forward(self, x):
return self.lam(x)
def kl_term(mean, scale):
"""KL divergence between N(mean, scale) and N(0, 1)"""
return .5 * (1 - 2 * torch.log(scale) + (mean * mean + scale * scale))
def pois_llik(x, mean):
"""Log likelihood of x distributed as Poisson"""
return x * torch.log(mean) - mean - torch.lgamma(x + 1)
class PVAE(torch.nn.Module):
def __init__(self, input_dim, latent_dim):
super().__init__()
self.encoder = Encoder(input_dim, latent_dim)
self.decoder = Pois(latent_dim, input_dim)
def loss(self, x, s, stoch_samples):
mean, scale = self.encoder.forward(x)
# [batch_size]
# Important: this is analytic
kl = torch.sum(kl_term(mean, scale), dim=1)
# [stoch_samples, batch_size, latent_dim]
qz = torch.distributions.Normal(mean, scale).rsample(stoch_samples)
# [stoch_samples, batch_size, input_dim]
lam = self.decoder.forward(qz)
error = torch.mean(torch.sum(pois_llik(x, lam), dim=2), dim=0)
# Important: optim minimizes
loss = -torch.sum(error - kl)
return loss
def fit(self, x, s, max_epochs, verbose=False, stoch_samples=10, **kwargs):
"""Fit the model
:param x: torch.tensor [n_cells, n_genes]
:param s: torch.tensor [n_cells, 1]
"""
if torch.cuda.is_available():
# Move the model and data to the GPU
self.cuda()
x = x.cuda()
s = s.cuda()
stoch_samples = torch.Size([stoch_samples])
opt = torch.optim.RMSprop(self.parameters(), **kwargs)
for epoch in range(max_epochs):
opt.zero_grad()
loss = self.loss(x, s, stoch_samples)
if torch.isnan(loss):
return self
loss.backward()
opt.step()
if verbose and not epoch % 10:
print(f'[epoch={epoch}] elbo={-loss}')
return self
@torch.no_grad()
def denoise(self, x):
if torch.cuda.is_available():
x = x.cuda()
# Plug E[z | x] into the decoder
lam = self.decoder.forward(self.encoder.forward(x)[0])
if torch.cuda.is_available():
lam = lam.cpu()
return lam.numpy()
| en | 0.6906 | Encoder q(z | x) = N(mu(x), sigma^2(x)) Decoder p(x | z) ~ Poisson(s_i lambda(z)) KL divergence between N(mean, scale) and N(0, 1) Log likelihood of x distributed as Poisson # [batch_size] # Important: this is analytic # [stoch_samples, batch_size, latent_dim] # [stoch_samples, batch_size, input_dim] # Important: optim minimizes Fit the model :param x: torch.tensor [n_cells, n_genes] :param s: torch.tensor [n_cells, 1] # Move the model and data to the GPU # Plug E[z | x] into the decoder | 2.969267 | 3 |
kreator/controllers/pyramid.py | fearless-spider/kreator-cli | 0 | 6633086 | <reponame>fearless-spider/kreator-cli<gh_stars>0
from cement import Controller, ex
class Pyramid(Controller):
class Meta:
label = 'Pyramid'
stacked_on = 'base'
stacked_type = 'embedded'
# text displayed at the top of --help output
description = 'This component generate pyramid app'
# text displayed at the bottom of --help output
epilog = 'Usage: kreator pyramid --config kreator.yml'
def _default(self):
"""Default action if no sub-command is passed."""
self.app.args.print_help()
@ex(
help='kreator pyramid -c kreator.yml',
# sub-command level arguments. ex: 'kreator command1 --foo bar'
arguments=[
### add a sample foo option under subcommand namespace
(['-c', '--config'],
{'help': 'Config file to new pyramid app',
'action': 'store',
'dest': 'config'})
],
)
def pyramid(self):
"""Pyramid sub-command."""
data = {
}
if self.app.pargs.config is not None:
data['config'] = self.app.pargs.config
self.app.render(data, 'pyramid.jinja2')
| from cement import Controller, ex
class Pyramid(Controller):
class Meta:
label = 'Pyramid'
stacked_on = 'base'
stacked_type = 'embedded'
# text displayed at the top of --help output
description = 'This component generate pyramid app'
# text displayed at the bottom of --help output
epilog = 'Usage: kreator pyramid --config kreator.yml'
def _default(self):
"""Default action if no sub-command is passed."""
self.app.args.print_help()
@ex(
help='kreator pyramid -c kreator.yml',
# sub-command level arguments. ex: 'kreator command1 --foo bar'
arguments=[
### add a sample foo option under subcommand namespace
(['-c', '--config'],
{'help': 'Config file to new pyramid app',
'action': 'store',
'dest': 'config'})
],
)
def pyramid(self):
"""Pyramid sub-command."""
data = {
}
if self.app.pargs.config is not None:
data['config'] = self.app.pargs.config
self.app.render(data, 'pyramid.jinja2') | en | 0.422539 | # text displayed at the top of --help output # text displayed at the bottom of --help output Default action if no sub-command is passed. # sub-command level arguments. ex: 'kreator command1 --foo bar' ### add a sample foo option under subcommand namespace Pyramid sub-command. | 2.657121 | 3 |
ttsx/goods/urls.py | GaoHaiBin/freshtest | 0 | 6633087 | from django.conf.urls import url
from freshtest.ttsx.goods import views
urlpatterns = [
url(r'^', views)
] | from django.conf.urls import url
from freshtest.ttsx.goods import views
urlpatterns = [
url(r'^', views)
] | none | 1 | 1.375737 | 1 |
|
armory/baseline_models/pytorch/sincnet.py | paperwhite/armory | 0 | 6633088 | """
CNN model for raw audio classification
Model contributed by: MITRE Corporation
Adapted from: https://github.com/mravanelli/SincNet
"""
import logging
from art.classifiers import PyTorchClassifier
import numpy as np
import torch
from torch import nn
# Load model from MITRE external repo: https://github.com/hkakitani/SincNet
# This needs to be defined in your config's `external_github_repo` field to be
# downloaded and placed on the PYTHONPATH
from SincNet import dnn_models
logger = logging.getLogger(__name__)
# NOTE: Underlying dataset sample rate is 16 kHz. SincNet uses this SAMPLE_RATE to
# determine internal filter high cutoff frequency.
SAMPLE_RATE = 8000
WINDOW_STEP_SIZE = 375
WINDOW_LENGTH = int(SAMPLE_RATE * WINDOW_STEP_SIZE / 1000)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def numpy_random_preprocessing_fn(batch):
"""
Standardize, then normalize sound clips
Then generate a random cut of the input
"""
processed_batch = []
for clip in batch:
# convert and normalize
signal = clip.astype(np.float32)
# Signal normalization
signal = signal / np.max(np.abs(signal))
# make a pseudorandom cut of size equal to WINDOW_LENGTH
# (from SincNet's create_batches_rnd)
signal_length = len(signal)
np.random.seed(signal_length)
signal_start = int(
np.random.randint(signal_length / WINDOW_LENGTH - 1)
* WINDOW_LENGTH
% signal_length
)
signal_stop = signal_start + WINDOW_LENGTH
signal = signal[signal_start:signal_stop]
processed_batch.append(signal)
return np.array(processed_batch)
def numpy_all_preprocessing_fn(batch):
"""
Input is comprised of one or more clips, where each clip i
is given as an ndarray with shape (n_i,).
Preprocessing normalizes each clip and breaks each clip into an integer number
of non-overlapping segments of length WINDOW_LENGTH.
Output is a list of clips, each of shape (int(n_i/WINDOW_LENGTH), WINDOW_LENGTH)
"""
if len(batch) != 1:
raise NotImplementedError(
"Requires ART variable length input capability for batch size != 1"
)
processed_batch = []
for clip in batch:
# convert and normalize
signal = clip.astype(np.float64)
signal = signal / np.max(np.abs(signal))
# break into a number of chunks of equal length
num_chunks = int(len(signal) / WINDOW_LENGTH)
signal = signal[: num_chunks * WINDOW_LENGTH]
signal = np.reshape(signal, (num_chunks, WINDOW_LENGTH), order="C")
processed_batch.append(signal)
# remove outer batch (of size 1)
processed_batch = processed_batch[0]
return np.array(processed_batch)
def torch_random_preprocessing_fn(x):
"""
Standardize, then normalize sound clips
"""
if x.shape[0] != 1:
raise ValueError(f"Shape of batch x {x.shape[0]} != 1")
if x.dtype != torch.float32:
raise ValueError(f"dtype of batch x {x.dtype} != torch.float32")
if x.max() > 1.0:
raise ValueError(f"batch x max {x.max()} > 1.0")
if x.min() < -1.0:
raise ValueError(f"batch x min {x.min()} < -1.0")
x = x.squeeze(0)
# Signal normalization
x = x / x.abs().max()
# get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd)
signal_length = len(x)
np.random.seed(signal_length)
start = int(
np.random.randint(signal_length / WINDOW_LENGTH - 1)
* WINDOW_LENGTH
% signal_length
)
x = x[start : start + WINDOW_LENGTH]
x = x.unsqueeze(0)
return x
def torch_all_preprocessing_fn(x):
"""
Input is comprised of one or more clips, where each clip i
is given as an ndarray with shape (n_i,).
Preprocessing normalizes each clip and breaks each clip into an integer number
of non-overlapping segments of length WINDOW_LENGTH.
Output is a list of clips, each of shape (int(n_i/WINDOW_LENGTH), WINDOW_LENGTH)
"""
if x.shape[0] != 1:
raise NotImplementedError(
"Requires ART variable length input capability for batch size != 1"
)
if x.max() > 1.0:
raise ValueError(f"batch x max {x.max()} > 1.0")
if x.min() < -1.0:
raise ValueError(f"batch x min {x.min()} < -1.0")
if x.dtype != torch.float32:
raise ValueError(f"dtype of batch x {x.dtype} != torch.float32")
x = x.squeeze(0)
# Signal normalization
x = x / x.abs().max()
# break into a number of chunks of equal length
num_chunks = int(len(x) / WINDOW_LENGTH)
x = x[: num_chunks * WINDOW_LENGTH]
x = x.reshape((num_chunks, WINDOW_LENGTH))
return x
def sincnet(weights_path=None):
pretrained = weights_path is not None
if pretrained:
model_params = torch.load(weights_path, map_location=DEVICE)
else:
model_params = {}
CNN_params = model_params.get("CNN_model_par")
DNN1_params = model_params.get("DNN1_model_par")
DNN2_params = model_params.get("DNN2_model_par")
# from SincNet/cfg/SincNet_dev_LibriSpeech.cfg
cnn_N_filt = [80, 60, 60]
cnn_len_filt = [251, 5, 5]
cnn_max_pool_len = [3, 3, 3]
cnn_use_laynorm_inp = True
cnn_use_batchnorm_inp = False
cnn_use_laynorm = [True, True, True]
cnn_use_batchnorm = [False, False, False]
cnn_act = ["relu", "relu", "relu"]
cnn_drop = [0.0, 0.0, 0.0]
fc_lay = [2048, 2048, 2048]
fc_drop = [0.0, 0.0, 0.0]
fc_use_laynorm_inp = True
fc_use_batchnorm_inp = False
fc_use_batchnorm = [True, True, True]
fc_use_laynorm = [False, False, False]
fc_act = ["leaky_relu", "linear", "leaky_relu"]
class_lay = [40]
class_drop = [0.0, 0.0]
class_use_laynorm_inp = True
class_use_batchnorm_inp = False
class_use_batchnorm = [False]
class_use_laynorm = [False]
class_act = ["softmax"]
CNN_options = {
"input_dim": WINDOW_LENGTH,
"fs": SAMPLE_RATE,
"cnn_N_filt": cnn_N_filt,
"cnn_len_filt": cnn_len_filt,
"cnn_max_pool_len": cnn_max_pool_len,
"cnn_use_laynorm_inp": cnn_use_laynorm_inp,
"cnn_use_batchnorm_inp": cnn_use_batchnorm_inp,
"cnn_use_laynorm": cnn_use_laynorm,
"cnn_use_batchnorm": cnn_use_batchnorm,
"cnn_act": cnn_act,
"cnn_drop": cnn_drop,
"pretrained": pretrained,
"model_params": CNN_params,
}
DNN1_options = {
"fc_lay": fc_lay,
"fc_drop": fc_drop,
"fc_use_batchnorm": fc_use_batchnorm,
"fc_use_laynorm": fc_use_laynorm,
"fc_use_laynorm_inp": fc_use_laynorm_inp,
"fc_use_batchnorm_inp": fc_use_batchnorm_inp,
"fc_act": fc_act,
"pretrained": pretrained,
"model_params": DNN1_params,
}
DNN2_options = {
"input_dim": fc_lay[-1],
"fc_lay": class_lay,
"fc_drop": class_drop,
"fc_use_batchnorm": class_use_batchnorm,
"fc_use_laynorm": class_use_laynorm,
"fc_use_laynorm_inp": class_use_laynorm_inp,
"fc_use_batchnorm_inp": class_use_batchnorm_inp,
"fc_act": class_act,
}
sincNet = dnn_models.SincWrapper(DNN2_options, DNN1_options, CNN_options)
if pretrained:
sincNet.eval()
sincNet.load_state_dict(DNN2_params)
else:
sincNet.train()
return sincNet
class SincNetWrapper(nn.Module):
MODES = {
"random": torch_random_preprocessing_fn,
"all": torch_all_preprocessing_fn,
}
def __init__(self, model_kwargs, weights_path):
super().__init__()
predict_mode = model_kwargs.pop("predict_mode", "all")
if predict_mode not in self.MODES:
raise ValueError(f"predict_mode {predict_mode} not in {tuple(self.MODES)}")
self.predict_mode = predict_mode
self.model = sincnet(weights_path=weights_path, **model_kwargs)
self.model.to(DEVICE)
def forward(self, x):
if self.training:
# preprocessing should be done before model for arbitrary length input
return self.model(x)
x = self.MODES[self.predict_mode](x)
output = self.model(x)
if self.predict_mode == "all":
output = torch.mean(output, dim=0, keepdim=True)
return output
preprocessing_fn = numpy_random_preprocessing_fn
def get_art_model(model_kwargs, wrapper_kwargs, weights_path=None):
model = SincNetWrapper(model_kwargs, weights_path)
model.to(DEVICE)
wrapped_model = PyTorchClassifier(
model,
loss=torch.nn.NLLLoss(),
optimizer=torch.optim.RMSprop(
model.parameters(), lr=0.001, alpha=0.95, eps=1e-8
),
input_shape=(None,),
nb_classes=40,
**wrapper_kwargs,
)
return wrapped_model
| """
CNN model for raw audio classification
Model contributed by: MITRE Corporation
Adapted from: https://github.com/mravanelli/SincNet
"""
import logging
from art.classifiers import PyTorchClassifier
import numpy as np
import torch
from torch import nn
# Load model from MITRE external repo: https://github.com/hkakitani/SincNet
# This needs to be defined in your config's `external_github_repo` field to be
# downloaded and placed on the PYTHONPATH
from SincNet import dnn_models
logger = logging.getLogger(__name__)
# NOTE: Underlying dataset sample rate is 16 kHz. SincNet uses this SAMPLE_RATE to
# determine internal filter high cutoff frequency.
SAMPLE_RATE = 8000
WINDOW_STEP_SIZE = 375
WINDOW_LENGTH = int(SAMPLE_RATE * WINDOW_STEP_SIZE / 1000)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def numpy_random_preprocessing_fn(batch):
"""
Standardize, then normalize sound clips
Then generate a random cut of the input
"""
processed_batch = []
for clip in batch:
# convert and normalize
signal = clip.astype(np.float32)
# Signal normalization
signal = signal / np.max(np.abs(signal))
# make a pseudorandom cut of size equal to WINDOW_LENGTH
# (from SincNet's create_batches_rnd)
signal_length = len(signal)
np.random.seed(signal_length)
signal_start = int(
np.random.randint(signal_length / WINDOW_LENGTH - 1)
* WINDOW_LENGTH
% signal_length
)
signal_stop = signal_start + WINDOW_LENGTH
signal = signal[signal_start:signal_stop]
processed_batch.append(signal)
return np.array(processed_batch)
def numpy_all_preprocessing_fn(batch):
"""
Input is comprised of one or more clips, where each clip i
is given as an ndarray with shape (n_i,).
Preprocessing normalizes each clip and breaks each clip into an integer number
of non-overlapping segments of length WINDOW_LENGTH.
Output is a list of clips, each of shape (int(n_i/WINDOW_LENGTH), WINDOW_LENGTH)
"""
if len(batch) != 1:
raise NotImplementedError(
"Requires ART variable length input capability for batch size != 1"
)
processed_batch = []
for clip in batch:
# convert and normalize
signal = clip.astype(np.float64)
signal = signal / np.max(np.abs(signal))
# break into a number of chunks of equal length
num_chunks = int(len(signal) / WINDOW_LENGTH)
signal = signal[: num_chunks * WINDOW_LENGTH]
signal = np.reshape(signal, (num_chunks, WINDOW_LENGTH), order="C")
processed_batch.append(signal)
# remove outer batch (of size 1)
processed_batch = processed_batch[0]
return np.array(processed_batch)
def torch_random_preprocessing_fn(x):
"""
Standardize, then normalize sound clips
"""
if x.shape[0] != 1:
raise ValueError(f"Shape of batch x {x.shape[0]} != 1")
if x.dtype != torch.float32:
raise ValueError(f"dtype of batch x {x.dtype} != torch.float32")
if x.max() > 1.0:
raise ValueError(f"batch x max {x.max()} > 1.0")
if x.min() < -1.0:
raise ValueError(f"batch x min {x.min()} < -1.0")
x = x.squeeze(0)
# Signal normalization
x = x / x.abs().max()
# get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd)
signal_length = len(x)
np.random.seed(signal_length)
start = int(
np.random.randint(signal_length / WINDOW_LENGTH - 1)
* WINDOW_LENGTH
% signal_length
)
x = x[start : start + WINDOW_LENGTH]
x = x.unsqueeze(0)
return x
def torch_all_preprocessing_fn(x):
"""
Input is comprised of one or more clips, where each clip i
is given as an ndarray with shape (n_i,).
Preprocessing normalizes each clip and breaks each clip into an integer number
of non-overlapping segments of length WINDOW_LENGTH.
Output is a list of clips, each of shape (int(n_i/WINDOW_LENGTH), WINDOW_LENGTH)
"""
if x.shape[0] != 1:
raise NotImplementedError(
"Requires ART variable length input capability for batch size != 1"
)
if x.max() > 1.0:
raise ValueError(f"batch x max {x.max()} > 1.0")
if x.min() < -1.0:
raise ValueError(f"batch x min {x.min()} < -1.0")
if x.dtype != torch.float32:
raise ValueError(f"dtype of batch x {x.dtype} != torch.float32")
x = x.squeeze(0)
# Signal normalization
x = x / x.abs().max()
# break into a number of chunks of equal length
num_chunks = int(len(x) / WINDOW_LENGTH)
x = x[: num_chunks * WINDOW_LENGTH]
x = x.reshape((num_chunks, WINDOW_LENGTH))
return x
def sincnet(weights_path=None):
pretrained = weights_path is not None
if pretrained:
model_params = torch.load(weights_path, map_location=DEVICE)
else:
model_params = {}
CNN_params = model_params.get("CNN_model_par")
DNN1_params = model_params.get("DNN1_model_par")
DNN2_params = model_params.get("DNN2_model_par")
# from SincNet/cfg/SincNet_dev_LibriSpeech.cfg
cnn_N_filt = [80, 60, 60]
cnn_len_filt = [251, 5, 5]
cnn_max_pool_len = [3, 3, 3]
cnn_use_laynorm_inp = True
cnn_use_batchnorm_inp = False
cnn_use_laynorm = [True, True, True]
cnn_use_batchnorm = [False, False, False]
cnn_act = ["relu", "relu", "relu"]
cnn_drop = [0.0, 0.0, 0.0]
fc_lay = [2048, 2048, 2048]
fc_drop = [0.0, 0.0, 0.0]
fc_use_laynorm_inp = True
fc_use_batchnorm_inp = False
fc_use_batchnorm = [True, True, True]
fc_use_laynorm = [False, False, False]
fc_act = ["leaky_relu", "linear", "leaky_relu"]
class_lay = [40]
class_drop = [0.0, 0.0]
class_use_laynorm_inp = True
class_use_batchnorm_inp = False
class_use_batchnorm = [False]
class_use_laynorm = [False]
class_act = ["softmax"]
CNN_options = {
"input_dim": WINDOW_LENGTH,
"fs": SAMPLE_RATE,
"cnn_N_filt": cnn_N_filt,
"cnn_len_filt": cnn_len_filt,
"cnn_max_pool_len": cnn_max_pool_len,
"cnn_use_laynorm_inp": cnn_use_laynorm_inp,
"cnn_use_batchnorm_inp": cnn_use_batchnorm_inp,
"cnn_use_laynorm": cnn_use_laynorm,
"cnn_use_batchnorm": cnn_use_batchnorm,
"cnn_act": cnn_act,
"cnn_drop": cnn_drop,
"pretrained": pretrained,
"model_params": CNN_params,
}
DNN1_options = {
"fc_lay": fc_lay,
"fc_drop": fc_drop,
"fc_use_batchnorm": fc_use_batchnorm,
"fc_use_laynorm": fc_use_laynorm,
"fc_use_laynorm_inp": fc_use_laynorm_inp,
"fc_use_batchnorm_inp": fc_use_batchnorm_inp,
"fc_act": fc_act,
"pretrained": pretrained,
"model_params": DNN1_params,
}
DNN2_options = {
"input_dim": fc_lay[-1],
"fc_lay": class_lay,
"fc_drop": class_drop,
"fc_use_batchnorm": class_use_batchnorm,
"fc_use_laynorm": class_use_laynorm,
"fc_use_laynorm_inp": class_use_laynorm_inp,
"fc_use_batchnorm_inp": class_use_batchnorm_inp,
"fc_act": class_act,
}
sincNet = dnn_models.SincWrapper(DNN2_options, DNN1_options, CNN_options)
if pretrained:
sincNet.eval()
sincNet.load_state_dict(DNN2_params)
else:
sincNet.train()
return sincNet
class SincNetWrapper(nn.Module):
MODES = {
"random": torch_random_preprocessing_fn,
"all": torch_all_preprocessing_fn,
}
def __init__(self, model_kwargs, weights_path):
super().__init__()
predict_mode = model_kwargs.pop("predict_mode", "all")
if predict_mode not in self.MODES:
raise ValueError(f"predict_mode {predict_mode} not in {tuple(self.MODES)}")
self.predict_mode = predict_mode
self.model = sincnet(weights_path=weights_path, **model_kwargs)
self.model.to(DEVICE)
def forward(self, x):
if self.training:
# preprocessing should be done before model for arbitrary length input
return self.model(x)
x = self.MODES[self.predict_mode](x)
output = self.model(x)
if self.predict_mode == "all":
output = torch.mean(output, dim=0, keepdim=True)
return output
preprocessing_fn = numpy_random_preprocessing_fn
def get_art_model(model_kwargs, wrapper_kwargs, weights_path=None):
model = SincNetWrapper(model_kwargs, weights_path)
model.to(DEVICE)
wrapped_model = PyTorchClassifier(
model,
loss=torch.nn.NLLLoss(),
optimizer=torch.optim.RMSprop(
model.parameters(), lr=0.001, alpha=0.95, eps=1e-8
),
input_shape=(None,),
nb_classes=40,
**wrapper_kwargs,
)
return wrapped_model
| en | 0.802542 | CNN model for raw audio classification Model contributed by: MITRE Corporation Adapted from: https://github.com/mravanelli/SincNet # Load model from MITRE external repo: https://github.com/hkakitani/SincNet # This needs to be defined in your config's `external_github_repo` field to be # downloaded and placed on the PYTHONPATH # NOTE: Underlying dataset sample rate is 16 kHz. SincNet uses this SAMPLE_RATE to # determine internal filter high cutoff frequency. Standardize, then normalize sound clips Then generate a random cut of the input # convert and normalize # Signal normalization # make a pseudorandom cut of size equal to WINDOW_LENGTH # (from SincNet's create_batches_rnd) Input is comprised of one or more clips, where each clip i is given as an ndarray with shape (n_i,). Preprocessing normalizes each clip and breaks each clip into an integer number of non-overlapping segments of length WINDOW_LENGTH. Output is a list of clips, each of shape (int(n_i/WINDOW_LENGTH), WINDOW_LENGTH) # convert and normalize # break into a number of chunks of equal length # remove outer batch (of size 1) Standardize, then normalize sound clips # Signal normalization # get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd) Input is comprised of one or more clips, where each clip i is given as an ndarray with shape (n_i,). Preprocessing normalizes each clip and breaks each clip into an integer number of non-overlapping segments of length WINDOW_LENGTH. Output is a list of clips, each of shape (int(n_i/WINDOW_LENGTH), WINDOW_LENGTH) # Signal normalization # break into a number of chunks of equal length # from SincNet/cfg/SincNet_dev_LibriSpeech.cfg # preprocessing should be done before model for arbitrary length input | 2.674443 | 3 |
riddle_bot_source/userinfo.py | anthonysasso2001/Python-Riddle-Bot | 0 | 6633089 | from dataclasses import dataclass
import pickle
@dataclass
class User:
"""Class for user struct / class"""
name: str = "-1"
password: str = <PASSWORD>"
fibonacciScore: int = 0
minesweeperScore: int = 0
def save_user_data(file_name,users):
#
return
def load_user_data(file_name):
return
| from dataclasses import dataclass
import pickle
@dataclass
class User:
"""Class for user struct / class"""
name: str = "-1"
password: str = <PASSWORD>"
fibonacciScore: int = 0
minesweeperScore: int = 0
def save_user_data(file_name,users):
#
return
def load_user_data(file_name):
return
| en | 0.927561 | Class for user struct / class # | 3.179976 | 3 |
validator/sawtooth_validator/execution/scheduler_parallel.py | manojgop/sawtooth-core | 0 | 6633090 | <filename>validator/sawtooth_validator/execution/scheduler_parallel.py
# Copyright 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from itertools import filterfalse
from threading import Condition
import logging
from collections import deque
from collections import namedtuple
from collections import OrderedDict
from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_validator.execution.scheduler import BatchExecutionResult
from sawtooth_validator.execution.scheduler import TxnExecutionResult
from sawtooth_validator.execution.scheduler import TxnInformation
from sawtooth_validator.execution.scheduler import Scheduler
from sawtooth_validator.execution.scheduler import SchedulerIterator
from sawtooth_validator.execution.scheduler_exceptions import SchedulerError
LOGGER = logging.getLogger(__name__)
_AnnotatedBatch = namedtuple('ScheduledBatch',
['batch', 'required', 'preserve'])
class AddressNotInTree(Exception):
def __init__(self, match=None):
super().__init__()
self.match = match
class Node:
def __init__(self, address, data=None):
self.address = address
self.children = set()
self.data = data
class Tree:
'''
This tree is a prefix tree: a node's address is always a strict
prefix of the addresses of its children, and every node either has
data or has multiple children.
'''
def __init__(self):
self._root = Node('')
def _get_child(self, node, address):
for child in node.children:
if address.startswith(child.address):
return child
match = None
for child in node.children:
if child.address.startswith(address):
match = child.address
raise AddressNotInTree(match=match)
def _walk_to_address(self, address):
node = self._root
yield node
# A node's address is always a proper prefix of the addresses
# of its children, so we only need to check the ordering. A
# more explicit but also more verbose and probably slower
# check would be:
#
# while address != node.address and address.startswith(node.address):
#
while node.address < address:
node = self._get_child(node, address)
yield node
def update(self, address, updater, prune=False):
'''
Walk to ADDRESS, creating nodes if necessary, and set the data
there to UPDATER(data).
Arguments:
address (str): the address to be updated
'''
node = self._get_or_create(address)
node.data = updater(node.data)
if prune:
node.children.clear()
def prune(self, address):
'''
Remove all children (and descendants) below ADDRESS.
Arguments:
address (str): the address to be pruned
'''
try:
for step in self._walk_to_address(address):
node = step
except AddressNotInTree:
return
node.children.clear()
def walk(self, address):
'''
Returns a stream of pairs of node addresses and data, raising
AddressNotInTree if ADDRESS is not in the tree.
First the ancestors of ADDRESS (including itself) are yielded,
earliest to latest, and then the descendants of ADDRESS are
yielded in an unspecified order.
Arguments:
address (str): the address to be walked
'''
for step in self._walk_to_address(address):
node = step
yield node.address, node.data
to_process = deque()
to_process.extendleft(
node.children)
while to_process:
node = to_process.pop()
yield node.address, node.data
if node.children:
to_process.extendleft(
node.children)
def _get_or_create(self, address):
# Walk as far down the tree as possible. If the desired
# address is reached, return that node. Otherwise, add a new
# one.
try:
for step in self._walk_to_address(address):
node = step
return node
except AddressNotInTree:
# The rest of the function deals with adding a new node,
# but there's no sense adding a level of indentation, so
# just pass here.
pass
# The address isn't in the tree, so a new node will be added
# one way or another.
new_node = Node(address)
# Try to get the next child with a matching prefix.
try:
prefix_len = len(node.address)
match = next(
child
for child in node.children
if child.address[prefix_len:].startswith(
address[prefix_len:][0])
)
# There's no match, so just add the new address as a child.
except StopIteration:
node.children.add(new_node)
return new_node
# If node address is 'rustic' and the address being added is
# 'rust', then 'rust' will be the intermediate node taking
# 'rustic' as a child.
if match.address.startswith(address):
node.children.add(new_node)
new_node.children.add(match)
node.children.remove(match)
return new_node
# The address and the match address share a common prefix, so
# an intermediate node with the prefix as its address will
# take them both as children.
shorter = (
address
if len(address) < len(match.address)
else match.address
)
for i in range(1, len(shorter)):
if address[i] != match.address[i]:
prefix = shorter[:i]
break
intermediate_node = Node(prefix)
intermediate_node.children.update((new_node, match))
node.children.add(intermediate_node)
node.children.remove(match)
return new_node
class Predecessors:
def __init__(self, readers, writer):
self.readers = readers
self.writer = writer
class PredecessorTree:
def __init__(self):
self._tree = Tree()
def add_reader(self, address, reader):
def updater(data):
if data is None:
return Predecessors(readers={reader}, writer=None)
data.readers.add(reader)
return data
self._tree.update(address, updater)
def set_writer(self, address, writer):
def updater(data):
if data is None:
return Predecessors(readers=set(), writer=writer)
data.writer = writer
data.readers.clear()
return data
self._tree.update(address, updater, prune=True)
def find_write_predecessors(self, address):
"""Returns all predecessor transaction ids for a write of the provided
address.
Arguments:
address (str): the radix address
Returns: a set of transaction ids
"""
# A write operation must be preceded by:
# - The "enclosing writer", which is the writer at the address or
# the nearest writer higher (closer to the root) in the tree.
# - The "enclosing readers", which are the readers at the address
# or higher in the tree.
# - The "children writers", which include all writers which are
# lower in the tree than the address.
# - The "children readers", which include all readers which are
# lower in the tree than the address.
#
# The enclosing writer must be added as it may have modified a node
# which must not happen after the current write.
#
# Writers which are higher in the tree than the enclosing writer may
# have modified a node at or under the given address. However, we do
# not need to include them here as they will have been considered a
# predecessor to the enclosing writer.
#
# Enclosing readers must be included. Technically, we only need to add
# enclosing readers which occurred after the enclosing writer, since
# the readers preceding the writer will have been considered a
# predecessor of the enclosing writer. However, with the current
# data structure we can not determine the difference between readers
# so we specify them all; this is mostly harmless as it will not change
# the eventual sort order generated by the scheduler.
#
# Children readers must be added, since their reads must happen prior
# to the write.
predecessors = set()
enclosing_writer = None
node_stream = self._tree.walk(address)
address_len = len(address)
# First, walk down from the root to the address, collecting all readers
# and updating the enclosing_writer if needed.
try:
for node_address, node in node_stream:
if node is not None:
predecessors.update(node.readers)
if node.writer is not None:
enclosing_writer = node.writer
if len(node_address) >= address_len:
break
# If the address isn't on the tree, then there aren't any
# predecessors below the node to worry about (because there
# isn't anything at all), so return the predecessors that have
# already been collected.
except AddressNotInTree as err:
if err.match is not None:
return self.find_write_predecessors(err.match)
return predecessors
finally:
if enclosing_writer is not None:
predecessors.add(enclosing_writer)
# Next, descend down the tree starting at the address node and
# find all descendant readers and writers.
for _, node in node_stream:
if node is not None:
if node.writer is not None:
predecessors.add(node.writer)
predecessors.update(node.readers)
return predecessors
def find_read_predecessors(self, address):
"""Returns all predecessor transaction ids for a read of the provided
address.
Arguments:
address (str): the radix address
Returns: a set of transaction ids
"""
# A read operation must be preceded by:
# - The "enclosing writer", which is the writer at the address or
# the nearest writer higher (closer to the root) in the tree.
# - All "children writers", which include all writers which are
# lower in the tree than the address.
#
# The enclosing writer must be added as it is possible it updated the
# contents stored at address.
#
# Writers which are higher in the tree than the enclosing writer may
# have modified the address. However, we do not need to include them
# here as they will have been considered a predecessor to the enclosing
# writer.
#
# Children writers must be included as they may have updated addresses
# lower in the tree, and these writers will have always been preceded
# by the enclosing writer.
#
# We do not need to add any readers, since a reader cannot impact the
# value which we are reading. The relationship is transitive, in that
# this reader will also not impact the readers already recorded in the
# tree.
predecessors = set()
enclosing_writer = None
node_stream = self._tree.walk(address)
address_len = len(address)
# First, walk down from the root to the address, updating the
# enclosing_writer if needed.
try:
for node_address, node in node_stream:
if node is not None:
if node.writer is not None:
enclosing_writer = node.writer
if len(node_address) >= address_len:
break
# If the address isn't on the tree, then there aren't any
# predecessors below the node to worry about (because there
# isn't anything at all), so return the predecessors that have
# already been collected.
except AddressNotInTree as err:
if err.match is not None:
return self.find_read_predecessors(err.match)
return predecessors
finally:
if enclosing_writer is not None:
predecessors.add(enclosing_writer)
# Next, descend down the tree starting at the address node and
# find all descendant writers.
for _, node in node_stream:
if node is not None:
if node.writer is not None:
predecessors.add(node.writer)
return predecessors
class PredecessorChain:
def __init__(self):
self._predecessors_by_id = dict()
def add_relationship(self, txn_id, predecessors):
"""Add a predecessor-successor relationship between one txn id and
a set of predecessors.
Args:
txn_id (str): The transaction id of the transaction.
predecessors (set): The transaction ids of the
transaction's predecessors
Returns:
None
"""
all_pred = set(predecessors)
for pred in predecessors:
all_pred.update(self._predecessors_by_id[pred])
self._predecessors_by_id[txn_id] = all_pred
def is_predecessor_of_other(self, predecessor, others):
"""Returns whether the predecessor is a predecessor or a predecessor
of a predecessor...of any of the others.
Args:
predecessor (str): The txn id of the predecessor.
others (list(str)): The txn id of the successor.
Returns:
(bool)
"""
return any(predecessor in self._predecessors_by_id[o] for o in others)
class ParallelScheduler(Scheduler):
def __init__(self, squash_handler, first_state_hash, always_persist):
self._squash = squash_handler
self._first_state_hash = first_state_hash
self._last_state_hash = first_state_hash
self._condition = Condition()
self._predecessor_tree = PredecessorTree()
self._txn_predecessors = {}
self._always_persist = always_persist
self._predecessor_chain = PredecessorChain()
# Transaction identifiers which have been scheduled. Stored as a list,
# since order is important; SchedulerIterator instances, for example,
# must all return scheduled transactions in the same order.
self._scheduled = []
# Transactions that must be replayed but the prior result hasn't
# been returned yet.
self._outstanding = set()
# Batch id for the batch with the property that the batch doesn't have
# all txn results, and all batches prior to it have all their txn
# results.
self._least_batch_id_wo_results = None
# A dict of transaction id to TxnInformation objects, containing all
# transactions present in self._scheduled.
self._scheduled_txn_info = {}
# All batches in their natural order (the order they were added to
# the scheduler.
self._batches = []
# The batches that have state hashes added in add_batch, used in
# Block validation.
self._batches_with_state_hash = {}
# Indexes to find a batch quickly
self._batches_by_id = {}
self._batches_by_txn_id = {}
# Transaction results
self._txn_results = {}
self._txns_available = OrderedDict()
self._transactions = {}
self._cancelled = False
self._final = False
def _find_input_dependencies(self, inputs):
"""Use the predecessor tree to find dependencies based on inputs.
Returns: A list of transaction ids.
"""
dependencies = []
for address in inputs:
dependencies.extend(
self._predecessor_tree.find_read_predecessors(address))
return dependencies
def _find_output_dependencies(self, outputs):
"""Use the predecessor tree to find dependencies based on outputs.
Returns: A list of transaction ids.
"""
dependencies = []
for address in outputs:
dependencies.extend(
self._predecessor_tree.find_write_predecessors(address))
return dependencies
def add_batch(self, batch, state_hash=None, required=False):
with self._condition:
if self._final:
raise SchedulerError('Invalid attempt to add batch to '
'finalized scheduler; batch: {}'
.format(batch.header_signature))
if not self._batches:
self._least_batch_id_wo_results = batch.header_signature
preserve = required
if not required:
# If this is the first non-required batch, it is preserved for
# the schedule to be completed (i.e. no empty schedules in the
# event of unschedule_incomplete_batches being called before
# the first batch is completed).
preserve = _first(
filterfalse(lambda sb: sb.required,
self._batches_by_id.values())) is None
self._batches.append(batch)
self._batches_by_id[batch.header_signature] = \
_AnnotatedBatch(batch, required=required, preserve=preserve)
for txn in batch.transactions:
self._batches_by_txn_id[txn.header_signature] = batch
self._txns_available[txn.header_signature] = txn
self._transactions[txn.header_signature] = txn
if state_hash is not None:
b_id = batch.header_signature
self._batches_with_state_hash[b_id] = state_hash
# For dependency handling: First, we determine our dependencies
# based on the current state of the predecessor tree. Second,
# we update the predecessor tree with reader and writer
# information based on input and outputs.
for txn in batch.transactions:
header = TransactionHeader()
header.ParseFromString(txn.header)
# Calculate predecessors (transaction ids which must come
# prior to the current transaction).
predecessors = self._find_input_dependencies(header.inputs)
predecessors.extend(
self._find_output_dependencies(header.outputs))
txn_id = txn.header_signature
# Update our internal state with the computed predecessors.
self._txn_predecessors[txn_id] = set(predecessors)
self._predecessor_chain.add_relationship(
txn_id=txn_id,
predecessors=predecessors)
# Update the predecessor tree.
#
# Order of reader/writer operations is relevant. A writer
# may overshadow a reader. For example, if the transaction
# has the same input/output address, the end result will be
# this writer (txn.header_signature) stored at the address of
# the predecessor tree. The reader information will have been
# discarded. Write operations to partial addresses will also
# overshadow entire parts of the predecessor tree.
#
# Thus, the order here (inputs then outputs) will cause the
# minimal amount of relevant information to be stored in the
# predecessor tree, with duplicate information being
# automatically discarded by the set_writer() call.
for address in header.inputs:
self._predecessor_tree.add_reader(
address, txn_id)
for address in header.outputs:
self._predecessor_tree.set_writer(
address, txn_id)
self._condition.notify_all()
def _is_explicit_request_for_state_root(self, batch_signature):
return batch_signature in self._batches_with_state_hash
def _is_implicit_request_for_state_root(self, batch_signature):
return self._final and self._is_last_valid_batch(batch_signature)
def _is_valid_batch(self, batch):
for txn in batch.transactions:
if txn.header_signature not in self._txn_results:
raise _UnscheduledTransactionError()
result = self._txn_results[txn.header_signature]
if not result.is_valid:
return False
return True
def _is_last_valid_batch(self, batch_signature):
batch = self._batches_by_id[batch_signature].batch
if not self._is_valid_batch(batch):
return False
index_of_next = self._batches.index(batch) + 1
for later_batch in self._batches[index_of_next:]:
if self._is_valid_batch(later_batch):
return False
return True
def _get_contexts_for_squash(self, batch_signature):
"""Starting with the batch referenced by batch_signature, iterate back
through the batches and for each valid batch collect the context_id.
At the end remove contexts for txns that are other txn's predecessors.
Args:
batch_signature (str): The batch to start from, moving back through
the batches in the scheduler
Returns:
(list): Context ids that haven't been previous base contexts.
"""
batch = self._batches_by_id[batch_signature].batch
index = self._batches.index(batch)
contexts = []
txns_added_predecessors = []
for b in self._batches[index::-1]:
batch_is_valid = True
contexts_from_batch = []
for txn in b.transactions[::-1]:
result = self._txn_results[txn.header_signature]
if not result.is_valid:
batch_is_valid = False
break
else:
txn_id = txn.header_signature
if txn_id not in txns_added_predecessors:
txns_added_predecessors.append(
self._txn_predecessors[txn_id])
contexts_from_batch.append(result.context_id)
if batch_is_valid:
contexts.extend(contexts_from_batch)
return contexts
def _is_state_hash_correct(self, state_hash, batch_id):
return state_hash == self._batches_with_state_hash[batch_id]
def get_batch_execution_result(self, batch_signature):
with self._condition:
# This method calculates the BatchExecutionResult on the fly,
# where only the TxnExecutionResults are cached, instead
# of BatchExecutionResults, as in the SerialScheduler
if batch_signature not in self._batches_by_id:
return None
batch = self._batches_by_id[batch_signature].batch
if not self._is_valid_batch(batch):
return BatchExecutionResult(is_valid=False, state_hash=None)
state_hash = None
try:
if self._is_explicit_request_for_state_root(batch_signature):
contexts = self._get_contexts_for_squash(batch_signature)
state_hash = self._squash(
self._first_state_hash,
contexts,
persist=False,
clean_up=False)
if self._is_state_hash_correct(state_hash,
batch_signature):
self._squash(
self._first_state_hash,
contexts,
persist=True,
clean_up=True)
else:
self._squash(
self._first_state_hash,
contexts,
persist=False,
clean_up=True)
elif self._is_implicit_request_for_state_root(batch_signature):
contexts = self._get_contexts_for_squash(batch_signature)
state_hash = self._squash(
self._first_state_hash,
contexts,
persist=self._always_persist,
clean_up=True)
except _UnscheduledTransactionError:
return None
return BatchExecutionResult(is_valid=True, state_hash=state_hash)
def get_transaction_execution_results(self, batch_signature):
with self._condition:
annotated_batch = self._batches_by_id.get(batch_signature)
if annotated_batch is None:
return None
results = []
for txn in annotated_batch.batch.transactions:
result = self._txn_results.get(txn.header_signature)
if result is not None:
results.append(result)
return results
def _is_predecessor_of_possible_successor(self,
txn_id,
possible_successor):
return self._predecessor_chain.is_predecessor_of_other(
txn_id,
[possible_successor])
def _txn_has_result(self, txn_id):
return txn_id in self._txn_results
def _is_in_same_batch(self, txn_id_1, txn_id_2):
return self._batches_by_txn_id[txn_id_1] == \
self._batches_by_txn_id[txn_id_2]
def _is_txn_to_replay(self, txn_id, possible_successor, already_seen):
"""Decide if possible_successor should be replayed.
Args:
txn_id (str): Id of txn in failed batch.
possible_successor (str): Id of txn to possibly replay.
already_seen (list): A list of possible_successors that have
been replayed.
Returns:
(bool): If the possible_successor should be replayed.
"""
is_successor = self._is_predecessor_of_possible_successor(
txn_id,
possible_successor)
in_different_batch = not self._is_in_same_batch(txn_id,
possible_successor)
has_not_been_seen = possible_successor not in already_seen
return is_successor and in_different_batch and has_not_been_seen
def _remove_subsequent_result_because_of_batch_failure(self, sig):
"""Remove transactions from scheduled and txn_results for
successors of txns in a failed batch. These transactions will now,
or in the future be rescheduled in next_transaction; giving a
replay ability.
Args:
sig (str): Transaction header signature
"""
batch = self._batches_by_txn_id[sig]
seen = []
for txn in batch.transactions:
txn_id = txn.header_signature
for poss_successor in self._scheduled.copy():
if not self.is_transaction_in_schedule(poss_successor):
continue
if self._is_txn_to_replay(txn_id, poss_successor, seen):
if self._txn_has_result(poss_successor):
del self._txn_results[poss_successor]
self._scheduled.remove(poss_successor)
self._txns_available[poss_successor] = \
self._transactions[poss_successor]
else:
self._outstanding.add(poss_successor)
seen.append(poss_successor)
def _reschedule_if_outstanding(self, txn_signature):
if txn_signature in self._outstanding:
self._txns_available[txn_signature] = \
self._transactions[txn_signature]
self._scheduled.remove(txn_signature)
self._outstanding.discard(txn_signature)
return True
return False
def _index_of_batch(self, batch):
batch_index = None
try:
batch_index = self._batches.index(batch)
except ValueError:
pass
return batch_index
def _set_least_batch_id(self, txn_signature):
"""Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set.
"""
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(
self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if current_index <= least_index:
return
# Test to see if all batches from the least_batch to
# the prior batch to the current batch have results.
if all(
all(t.header_signature in self._txn_results
for t in b.transactions)
for b in self._batches[least_index:current_index]):
all_prior = True
if not all_prior:
return
possible_least = self._batches[current_index].header_signature
# Find the first batch from the current batch on, that doesn't have
# all results.
for b in self._batches[current_index:]:
if not all(t.header_signature in self._txn_results
for t in b.transactions):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least
def set_transaction_execution_result(
self, txn_signature, is_valid, context_id, state_changes=None,
events=None, data=None, error_message="", error_data=b""):
with self._condition:
if txn_signature not in self._scheduled:
raise SchedulerError(
"transaction not scheduled: {}".format(txn_signature))
if txn_signature not in self._batches_by_txn_id:
return
self._set_least_batch_id(txn_signature=txn_signature)
if not is_valid:
self._remove_subsequent_result_because_of_batch_failure(
txn_signature)
is_rescheduled = self._reschedule_if_outstanding(txn_signature)
if not is_rescheduled:
self._txn_results[txn_signature] = TxnExecutionResult(
signature=txn_signature,
is_valid=is_valid,
context_id=context_id if is_valid else None,
state_hash=self._first_state_hash if is_valid else None,
state_changes=state_changes,
events=events,
data=data,
error_message=error_message,
error_data=error_data)
self._condition.notify_all()
def _has_predecessors(self, txn_id):
for predecessor_id in self._txn_predecessors[txn_id]:
if predecessor_id not in self._txn_results:
return True
# Since get_initial_state_for_transaction gets context ids not
# just from predecessors but also in the case of an enclosing
# writer failing, predecessors of that predecessor, this extra
# check is needed.
for pre_pred_id in self._txn_predecessors[predecessor_id]:
if pre_pred_id not in self._txn_results:
return True
return False
def _is_outstanding(self, txn_id):
return txn_id in self._outstanding
def _txn_is_in_valid_batch(self, txn_id):
"""Returns whether the transaction is in a valid batch.
Args:
txn_id (str): The transaction header signature.
Returns:
(bool): True if the txn's batch is valid, False otherwise.
"""
batch = self._batches_by_txn_id[txn_id]
# Return whether every transaction in the batch with a
# transaction result is valid
return all(
self._txn_results[sig].is_valid
for sig in set(self._txn_results).intersection(
(txn.header_signature for txn in batch.transactions)))
def _get_initial_state_for_transaction(self, txn):
# Collect contexts that this transaction depends upon
# We assume that all prior txns in the batch are valid
# or else this transaction wouldn't run. We assume that
# the mechanism in next_transaction makes sure that each
# predecessor txn has a result. Also any explicit
# dependencies that could have failed this txn did so.
contexts = []
txn_dependencies = deque()
txn_dependencies.extend(self._txn_predecessors[txn.header_signature])
while txn_dependencies:
prior_txn_id = txn_dependencies.popleft()
if self._txn_is_in_valid_batch(prior_txn_id):
result = self._txn_results[prior_txn_id]
if (prior_txn_id, result.context_id) not in contexts:
contexts.append((prior_txn_id, result.context_id))
else:
txn_dependencies.extend(self._txn_predecessors[prior_txn_id])
contexts.sort(
key=lambda x: self._index_of_txn_in_schedule(x[0]),
reverse=True)
return [c_id for _, c_id in contexts]
def _index_of_txn_in_schedule(self, txn_id):
batch = self._batches_by_txn_id[txn_id]
index_of_batch_in_schedule = self._batches.index(batch)
number_of_txns_in_prior_batches = 0
for prior in self._batches[:index_of_batch_in_schedule]:
number_of_txns_in_prior_batches += len(prior.transactions)
txn_index, _ = next(
(i, t)
for i, t in enumerate(batch.transactions)
if t.header_signature == txn_id)
return number_of_txns_in_prior_batches + txn_index - 1
def _can_fail_fast(self, txn_id):
batch_id = self._batches_by_txn_id[txn_id].header_signature
return batch_id == self._least_batch_id_wo_results
def next_transaction(self):
with self._condition:
# We return the next transaction which hasn't been scheduled and
# is not blocked by a dependency.
next_txn = None
no_longer_available = []
for txn_id, txn in self._txns_available.items():
if (self._has_predecessors(txn_id)
or self._is_outstanding(txn_id)):
continue
header = TransactionHeader()
header.ParseFromString(txn.header)
deps = tuple(header.dependencies)
if self._dependency_not_processed(deps):
continue
if self._txn_failed_by_dep(deps):
no_longer_available.append(txn_id)
self._txn_results[txn_id] = \
TxnExecutionResult(
signature=txn_id,
is_valid=False,
context_id=None,
state_hash=None)
continue
if not self._txn_is_in_valid_batch(txn_id) and \
self._can_fail_fast(txn_id):
self._txn_results[txn_id] = \
TxnExecutionResult(False, None, None)
no_longer_available.append(txn_id)
continue
next_txn = txn
break
for txn_id in no_longer_available:
del self._txns_available[txn_id]
if next_txn is not None:
bases = self._get_initial_state_for_transaction(next_txn)
info = TxnInformation(
txn=next_txn,
state_hash=self._first_state_hash,
base_context_ids=bases)
self._scheduled.append(next_txn.header_signature)
del self._txns_available[next_txn.header_signature]
self._scheduled_txn_info[next_txn.header_signature] = info
return info
return None
def _dependency_not_processed(self, deps):
if any(not self._all_in_batch_have_results(d)
for d in deps
if d in self._batches_by_txn_id):
return True
return False
def _txn_failed_by_dep(self, deps):
if any(self._any_in_batch_are_invalid(d)
for d in deps
if d in self._batches_by_txn_id):
return True
return False
def _all_in_batch_have_results(self, txn_id):
batch = self._batches_by_txn_id[txn_id]
return all(
t.header_signature in self._txn_results
for t in list(batch.transactions))
def _any_in_batch_are_invalid(self, txn_id):
batch = self._batches_by_txn_id[txn_id]
return any(not self._txn_results[t.header_signature].is_valid
for t in list(batch.transactions))
def available(self):
with self._condition:
# We return the next transaction which hasn't been scheduled and
# is not blocked by a dependency.
count = 0
for txn_id in self._txns_available:
if not self._has_predecessors(txn_id):
count += 1
return count
def unschedule_incomplete_batches(self):
incomplete_batches = set()
with self._condition:
# These transactions have never been scheduled.
for txn_id, txn in self._txns_available.items():
batch = self._batches_by_txn_id[txn_id]
batch_id = batch.header_signature
annotated_batch = self._batches_by_id[batch_id]
if not annotated_batch.preserve:
incomplete_batches.add(batch_id)
# These transactions were in flight.
in_flight = set(self._transactions.keys()).difference(
self._txn_results.keys())
for txn_id in in_flight:
batch = self._batches_by_txn_id[txn_id]
batch_id = batch.header_signature
annotated_batch = self._batches_by_id[batch_id]
if not annotated_batch.preserve:
incomplete_batches.add(batch_id)
# clean up the batches, including partial complete information
for batch_id in incomplete_batches:
annotated_batch = self._batches_by_id[batch_id]
self._batches.remove(annotated_batch.batch)
del self._batches_by_id[batch_id]
for txn in annotated_batch.batch.transactions:
txn_id = txn.header_signature
del self._batches_by_txn_id[txn_id]
if txn_id in self._txn_results:
del self._txn_results[txn_id]
if txn_id in self._txns_available:
del self._txns_available[txn_id]
if txn_id in self._outstanding:
self._outstanding.remove(txn_id)
self._condition.notify_all()
if incomplete_batches:
LOGGER.debug('Removed %s incomplete batches from the schedule',
len(incomplete_batches))
def is_transaction_in_schedule(self, txn_signature):
with self._condition:
return txn_signature in self._batches_by_txn_id
def finalize(self):
with self._condition:
self._final = True
self._condition.notify_all()
def _complete(self):
return self._final and \
len(self._txn_results) == len(self._batches_by_txn_id)
def complete(self, block=True):
with self._condition:
if self._complete():
return True
if block:
return self._condition.wait_for(self._complete)
return False
def __del__(self):
self.cancel()
def __iter__(self):
return SchedulerIterator(self, self._condition)
def count(self):
with self._condition:
return len(self._scheduled)
def get_transaction(self, index):
with self._condition:
return self._scheduled_txn_info[self._scheduled[index]]
def cancel(self):
with self._condition:
if not self._cancelled and not self._final:
contexts = [
tr.context_id for tr in self._txn_results.values()
if tr.context_id
]
self._squash(
self._first_state_hash,
contexts,
persist=False,
clean_up=True)
self._cancelled = True
self._condition.notify_all()
def is_cancelled(self):
with self._condition:
return self._cancelled
def _first(iterator):
try:
return next(iterator)
except StopIteration:
return None
class _UnscheduledTransactionError(Exception):
"""Thrown when information on a transaction is requested, but the
transaction has been unscheduled.
"""
pass
| <filename>validator/sawtooth_validator/execution/scheduler_parallel.py
# Copyright 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from itertools import filterfalse
from threading import Condition
import logging
from collections import deque
from collections import namedtuple
from collections import OrderedDict
from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_validator.execution.scheduler import BatchExecutionResult
from sawtooth_validator.execution.scheduler import TxnExecutionResult
from sawtooth_validator.execution.scheduler import TxnInformation
from sawtooth_validator.execution.scheduler import Scheduler
from sawtooth_validator.execution.scheduler import SchedulerIterator
from sawtooth_validator.execution.scheduler_exceptions import SchedulerError
LOGGER = logging.getLogger(__name__)
_AnnotatedBatch = namedtuple('ScheduledBatch',
['batch', 'required', 'preserve'])
class AddressNotInTree(Exception):
def __init__(self, match=None):
super().__init__()
self.match = match
class Node:
def __init__(self, address, data=None):
self.address = address
self.children = set()
self.data = data
class Tree:
'''
This tree is a prefix tree: a node's address is always a strict
prefix of the addresses of its children, and every node either has
data or has multiple children.
'''
def __init__(self):
self._root = Node('')
def _get_child(self, node, address):
for child in node.children:
if address.startswith(child.address):
return child
match = None
for child in node.children:
if child.address.startswith(address):
match = child.address
raise AddressNotInTree(match=match)
def _walk_to_address(self, address):
node = self._root
yield node
# A node's address is always a proper prefix of the addresses
# of its children, so we only need to check the ordering. A
# more explicit but also more verbose and probably slower
# check would be:
#
# while address != node.address and address.startswith(node.address):
#
while node.address < address:
node = self._get_child(node, address)
yield node
def update(self, address, updater, prune=False):
'''
Walk to ADDRESS, creating nodes if necessary, and set the data
there to UPDATER(data).
Arguments:
address (str): the address to be updated
'''
node = self._get_or_create(address)
node.data = updater(node.data)
if prune:
node.children.clear()
def prune(self, address):
'''
Remove all children (and descendants) below ADDRESS.
Arguments:
address (str): the address to be pruned
'''
try:
for step in self._walk_to_address(address):
node = step
except AddressNotInTree:
return
node.children.clear()
def walk(self, address):
'''
Returns a stream of pairs of node addresses and data, raising
AddressNotInTree if ADDRESS is not in the tree.
First the ancestors of ADDRESS (including itself) are yielded,
earliest to latest, and then the descendants of ADDRESS are
yielded in an unspecified order.
Arguments:
address (str): the address to be walked
'''
for step in self._walk_to_address(address):
node = step
yield node.address, node.data
to_process = deque()
to_process.extendleft(
node.children)
while to_process:
node = to_process.pop()
yield node.address, node.data
if node.children:
to_process.extendleft(
node.children)
def _get_or_create(self, address):
# Walk as far down the tree as possible. If the desired
# address is reached, return that node. Otherwise, add a new
# one.
try:
for step in self._walk_to_address(address):
node = step
return node
except AddressNotInTree:
# The rest of the function deals with adding a new node,
# but there's no sense adding a level of indentation, so
# just pass here.
pass
# The address isn't in the tree, so a new node will be added
# one way or another.
new_node = Node(address)
# Try to get the next child with a matching prefix.
try:
prefix_len = len(node.address)
match = next(
child
for child in node.children
if child.address[prefix_len:].startswith(
address[prefix_len:][0])
)
# There's no match, so just add the new address as a child.
except StopIteration:
node.children.add(new_node)
return new_node
# If node address is 'rustic' and the address being added is
# 'rust', then 'rust' will be the intermediate node taking
# 'rustic' as a child.
if match.address.startswith(address):
node.children.add(new_node)
new_node.children.add(match)
node.children.remove(match)
return new_node
# The address and the match address share a common prefix, so
# an intermediate node with the prefix as its address will
# take them both as children.
shorter = (
address
if len(address) < len(match.address)
else match.address
)
for i in range(1, len(shorter)):
if address[i] != match.address[i]:
prefix = shorter[:i]
break
intermediate_node = Node(prefix)
intermediate_node.children.update((new_node, match))
node.children.add(intermediate_node)
node.children.remove(match)
return new_node
class Predecessors:
def __init__(self, readers, writer):
self.readers = readers
self.writer = writer
class PredecessorTree:
def __init__(self):
self._tree = Tree()
def add_reader(self, address, reader):
def updater(data):
if data is None:
return Predecessors(readers={reader}, writer=None)
data.readers.add(reader)
return data
self._tree.update(address, updater)
def set_writer(self, address, writer):
def updater(data):
if data is None:
return Predecessors(readers=set(), writer=writer)
data.writer = writer
data.readers.clear()
return data
self._tree.update(address, updater, prune=True)
def find_write_predecessors(self, address):
"""Returns all predecessor transaction ids for a write of the provided
address.
Arguments:
address (str): the radix address
Returns: a set of transaction ids
"""
# A write operation must be preceded by:
# - The "enclosing writer", which is the writer at the address or
# the nearest writer higher (closer to the root) in the tree.
# - The "enclosing readers", which are the readers at the address
# or higher in the tree.
# - The "children writers", which include all writers which are
# lower in the tree than the address.
# - The "children readers", which include all readers which are
# lower in the tree than the address.
#
# The enclosing writer must be added as it may have modified a node
# which must not happen after the current write.
#
# Writers which are higher in the tree than the enclosing writer may
# have modified a node at or under the given address. However, we do
# not need to include them here as they will have been considered a
# predecessor to the enclosing writer.
#
# Enclosing readers must be included. Technically, we only need to add
# enclosing readers which occurred after the enclosing writer, since
# the readers preceding the writer will have been considered a
# predecessor of the enclosing writer. However, with the current
# data structure we can not determine the difference between readers
# so we specify them all; this is mostly harmless as it will not change
# the eventual sort order generated by the scheduler.
#
# Children readers must be added, since their reads must happen prior
# to the write.
predecessors = set()
enclosing_writer = None
node_stream = self._tree.walk(address)
address_len = len(address)
# First, walk down from the root to the address, collecting all readers
# and updating the enclosing_writer if needed.
try:
for node_address, node in node_stream:
if node is not None:
predecessors.update(node.readers)
if node.writer is not None:
enclosing_writer = node.writer
if len(node_address) >= address_len:
break
# If the address isn't on the tree, then there aren't any
# predecessors below the node to worry about (because there
# isn't anything at all), so return the predecessors that have
# already been collected.
except AddressNotInTree as err:
if err.match is not None:
return self.find_write_predecessors(err.match)
return predecessors
finally:
if enclosing_writer is not None:
predecessors.add(enclosing_writer)
# Next, descend down the tree starting at the address node and
# find all descendant readers and writers.
for _, node in node_stream:
if node is not None:
if node.writer is not None:
predecessors.add(node.writer)
predecessors.update(node.readers)
return predecessors
def find_read_predecessors(self, address):
"""Returns all predecessor transaction ids for a read of the provided
address.
Arguments:
address (str): the radix address
Returns: a set of transaction ids
"""
# A read operation must be preceded by:
# - The "enclosing writer", which is the writer at the address or
# the nearest writer higher (closer to the root) in the tree.
# - All "children writers", which include all writers which are
# lower in the tree than the address.
#
# The enclosing writer must be added as it is possible it updated the
# contents stored at address.
#
# Writers which are higher in the tree than the enclosing writer may
# have modified the address. However, we do not need to include them
# here as they will have been considered a predecessor to the enclosing
# writer.
#
# Children writers must be included as they may have updated addresses
# lower in the tree, and these writers will have always been preceded
# by the enclosing writer.
#
# We do not need to add any readers, since a reader cannot impact the
# value which we are reading. The relationship is transitive, in that
# this reader will also not impact the readers already recorded in the
# tree.
predecessors = set()
enclosing_writer = None
node_stream = self._tree.walk(address)
address_len = len(address)
# First, walk down from the root to the address, updating the
# enclosing_writer if needed.
try:
for node_address, node in node_stream:
if node is not None:
if node.writer is not None:
enclosing_writer = node.writer
if len(node_address) >= address_len:
break
# If the address isn't on the tree, then there aren't any
# predecessors below the node to worry about (because there
# isn't anything at all), so return the predecessors that have
# already been collected.
except AddressNotInTree as err:
if err.match is not None:
return self.find_read_predecessors(err.match)
return predecessors
finally:
if enclosing_writer is not None:
predecessors.add(enclosing_writer)
# Next, descend down the tree starting at the address node and
# find all descendant writers.
for _, node in node_stream:
if node is not None:
if node.writer is not None:
predecessors.add(node.writer)
return predecessors
class PredecessorChain:
def __init__(self):
self._predecessors_by_id = dict()
def add_relationship(self, txn_id, predecessors):
"""Add a predecessor-successor relationship between one txn id and
a set of predecessors.
Args:
txn_id (str): The transaction id of the transaction.
predecessors (set): The transaction ids of the
transaction's predecessors
Returns:
None
"""
all_pred = set(predecessors)
for pred in predecessors:
all_pred.update(self._predecessors_by_id[pred])
self._predecessors_by_id[txn_id] = all_pred
def is_predecessor_of_other(self, predecessor, others):
"""Returns whether the predecessor is a predecessor or a predecessor
of a predecessor...of any of the others.
Args:
predecessor (str): The txn id of the predecessor.
others (list(str)): The txn id of the successor.
Returns:
(bool)
"""
return any(predecessor in self._predecessors_by_id[o] for o in others)
class ParallelScheduler(Scheduler):
def __init__(self, squash_handler, first_state_hash, always_persist):
self._squash = squash_handler
self._first_state_hash = first_state_hash
self._last_state_hash = first_state_hash
self._condition = Condition()
self._predecessor_tree = PredecessorTree()
self._txn_predecessors = {}
self._always_persist = always_persist
self._predecessor_chain = PredecessorChain()
# Transaction identifiers which have been scheduled. Stored as a list,
# since order is important; SchedulerIterator instances, for example,
# must all return scheduled transactions in the same order.
self._scheduled = []
# Transactions that must be replayed but the prior result hasn't
# been returned yet.
self._outstanding = set()
# Batch id for the batch with the property that the batch doesn't have
# all txn results, and all batches prior to it have all their txn
# results.
self._least_batch_id_wo_results = None
# A dict of transaction id to TxnInformation objects, containing all
# transactions present in self._scheduled.
self._scheduled_txn_info = {}
# All batches in their natural order (the order they were added to
# the scheduler.
self._batches = []
# The batches that have state hashes added in add_batch, used in
# Block validation.
self._batches_with_state_hash = {}
# Indexes to find a batch quickly
self._batches_by_id = {}
self._batches_by_txn_id = {}
# Transaction results
self._txn_results = {}
self._txns_available = OrderedDict()
self._transactions = {}
self._cancelled = False
self._final = False
def _find_input_dependencies(self, inputs):
"""Use the predecessor tree to find dependencies based on inputs.
Returns: A list of transaction ids.
"""
dependencies = []
for address in inputs:
dependencies.extend(
self._predecessor_tree.find_read_predecessors(address))
return dependencies
def _find_output_dependencies(self, outputs):
"""Use the predecessor tree to find dependencies based on outputs.
Returns: A list of transaction ids.
"""
dependencies = []
for address in outputs:
dependencies.extend(
self._predecessor_tree.find_write_predecessors(address))
return dependencies
def add_batch(self, batch, state_hash=None, required=False):
with self._condition:
if self._final:
raise SchedulerError('Invalid attempt to add batch to '
'finalized scheduler; batch: {}'
.format(batch.header_signature))
if not self._batches:
self._least_batch_id_wo_results = batch.header_signature
preserve = required
if not required:
# If this is the first non-required batch, it is preserved for
# the schedule to be completed (i.e. no empty schedules in the
# event of unschedule_incomplete_batches being called before
# the first batch is completed).
preserve = _first(
filterfalse(lambda sb: sb.required,
self._batches_by_id.values())) is None
self._batches.append(batch)
self._batches_by_id[batch.header_signature] = \
_AnnotatedBatch(batch, required=required, preserve=preserve)
for txn in batch.transactions:
self._batches_by_txn_id[txn.header_signature] = batch
self._txns_available[txn.header_signature] = txn
self._transactions[txn.header_signature] = txn
if state_hash is not None:
b_id = batch.header_signature
self._batches_with_state_hash[b_id] = state_hash
# For dependency handling: First, we determine our dependencies
# based on the current state of the predecessor tree. Second,
# we update the predecessor tree with reader and writer
# information based on input and outputs.
for txn in batch.transactions:
header = TransactionHeader()
header.ParseFromString(txn.header)
# Calculate predecessors (transaction ids which must come
# prior to the current transaction).
predecessors = self._find_input_dependencies(header.inputs)
predecessors.extend(
self._find_output_dependencies(header.outputs))
txn_id = txn.header_signature
# Update our internal state with the computed predecessors.
self._txn_predecessors[txn_id] = set(predecessors)
self._predecessor_chain.add_relationship(
txn_id=txn_id,
predecessors=predecessors)
# Update the predecessor tree.
#
# Order of reader/writer operations is relevant. A writer
# may overshadow a reader. For example, if the transaction
# has the same input/output address, the end result will be
# this writer (txn.header_signature) stored at the address of
# the predecessor tree. The reader information will have been
# discarded. Write operations to partial addresses will also
# overshadow entire parts of the predecessor tree.
#
# Thus, the order here (inputs then outputs) will cause the
# minimal amount of relevant information to be stored in the
# predecessor tree, with duplicate information being
# automatically discarded by the set_writer() call.
for address in header.inputs:
self._predecessor_tree.add_reader(
address, txn_id)
for address in header.outputs:
self._predecessor_tree.set_writer(
address, txn_id)
self._condition.notify_all()
def _is_explicit_request_for_state_root(self, batch_signature):
return batch_signature in self._batches_with_state_hash
def _is_implicit_request_for_state_root(self, batch_signature):
return self._final and self._is_last_valid_batch(batch_signature)
def _is_valid_batch(self, batch):
for txn in batch.transactions:
if txn.header_signature not in self._txn_results:
raise _UnscheduledTransactionError()
result = self._txn_results[txn.header_signature]
if not result.is_valid:
return False
return True
def _is_last_valid_batch(self, batch_signature):
batch = self._batches_by_id[batch_signature].batch
if not self._is_valid_batch(batch):
return False
index_of_next = self._batches.index(batch) + 1
for later_batch in self._batches[index_of_next:]:
if self._is_valid_batch(later_batch):
return False
return True
def _get_contexts_for_squash(self, batch_signature):
"""Starting with the batch referenced by batch_signature, iterate back
through the batches and for each valid batch collect the context_id.
At the end remove contexts for txns that are other txn's predecessors.
Args:
batch_signature (str): The batch to start from, moving back through
the batches in the scheduler
Returns:
(list): Context ids that haven't been previous base contexts.
"""
batch = self._batches_by_id[batch_signature].batch
index = self._batches.index(batch)
contexts = []
txns_added_predecessors = []
for b in self._batches[index::-1]:
batch_is_valid = True
contexts_from_batch = []
for txn in b.transactions[::-1]:
result = self._txn_results[txn.header_signature]
if not result.is_valid:
batch_is_valid = False
break
else:
txn_id = txn.header_signature
if txn_id not in txns_added_predecessors:
txns_added_predecessors.append(
self._txn_predecessors[txn_id])
contexts_from_batch.append(result.context_id)
if batch_is_valid:
contexts.extend(contexts_from_batch)
return contexts
def _is_state_hash_correct(self, state_hash, batch_id):
return state_hash == self._batches_with_state_hash[batch_id]
def get_batch_execution_result(self, batch_signature):
with self._condition:
# This method calculates the BatchExecutionResult on the fly,
# where only the TxnExecutionResults are cached, instead
# of BatchExecutionResults, as in the SerialScheduler
if batch_signature not in self._batches_by_id:
return None
batch = self._batches_by_id[batch_signature].batch
if not self._is_valid_batch(batch):
return BatchExecutionResult(is_valid=False, state_hash=None)
state_hash = None
try:
if self._is_explicit_request_for_state_root(batch_signature):
contexts = self._get_contexts_for_squash(batch_signature)
state_hash = self._squash(
self._first_state_hash,
contexts,
persist=False,
clean_up=False)
if self._is_state_hash_correct(state_hash,
batch_signature):
self._squash(
self._first_state_hash,
contexts,
persist=True,
clean_up=True)
else:
self._squash(
self._first_state_hash,
contexts,
persist=False,
clean_up=True)
elif self._is_implicit_request_for_state_root(batch_signature):
contexts = self._get_contexts_for_squash(batch_signature)
state_hash = self._squash(
self._first_state_hash,
contexts,
persist=self._always_persist,
clean_up=True)
except _UnscheduledTransactionError:
return None
return BatchExecutionResult(is_valid=True, state_hash=state_hash)
def get_transaction_execution_results(self, batch_signature):
with self._condition:
annotated_batch = self._batches_by_id.get(batch_signature)
if annotated_batch is None:
return None
results = []
for txn in annotated_batch.batch.transactions:
result = self._txn_results.get(txn.header_signature)
if result is not None:
results.append(result)
return results
def _is_predecessor_of_possible_successor(self,
txn_id,
possible_successor):
return self._predecessor_chain.is_predecessor_of_other(
txn_id,
[possible_successor])
def _txn_has_result(self, txn_id):
return txn_id in self._txn_results
def _is_in_same_batch(self, txn_id_1, txn_id_2):
return self._batches_by_txn_id[txn_id_1] == \
self._batches_by_txn_id[txn_id_2]
def _is_txn_to_replay(self, txn_id, possible_successor, already_seen):
"""Decide if possible_successor should be replayed.
Args:
txn_id (str): Id of txn in failed batch.
possible_successor (str): Id of txn to possibly replay.
already_seen (list): A list of possible_successors that have
been replayed.
Returns:
(bool): If the possible_successor should be replayed.
"""
is_successor = self._is_predecessor_of_possible_successor(
txn_id,
possible_successor)
in_different_batch = not self._is_in_same_batch(txn_id,
possible_successor)
has_not_been_seen = possible_successor not in already_seen
return is_successor and in_different_batch and has_not_been_seen
def _remove_subsequent_result_because_of_batch_failure(self, sig):
"""Remove transactions from scheduled and txn_results for
successors of txns in a failed batch. These transactions will now,
or in the future be rescheduled in next_transaction; giving a
replay ability.
Args:
sig (str): Transaction header signature
"""
batch = self._batches_by_txn_id[sig]
seen = []
for txn in batch.transactions:
txn_id = txn.header_signature
for poss_successor in self._scheduled.copy():
if not self.is_transaction_in_schedule(poss_successor):
continue
if self._is_txn_to_replay(txn_id, poss_successor, seen):
if self._txn_has_result(poss_successor):
del self._txn_results[poss_successor]
self._scheduled.remove(poss_successor)
self._txns_available[poss_successor] = \
self._transactions[poss_successor]
else:
self._outstanding.add(poss_successor)
seen.append(poss_successor)
def _reschedule_if_outstanding(self, txn_signature):
if txn_signature in self._outstanding:
self._txns_available[txn_signature] = \
self._transactions[txn_signature]
self._scheduled.remove(txn_signature)
self._outstanding.discard(txn_signature)
return True
return False
def _index_of_batch(self, batch):
batch_index = None
try:
batch_index = self._batches.index(batch)
except ValueError:
pass
return batch_index
def _set_least_batch_id(self, txn_signature):
"""Set the first batch id that doesn't have all results.
Args:
txn_signature (str): The txn identifier of the transaction with
results being set.
"""
batch = self._batches_by_txn_id[txn_signature]
least_index = self._index_of_batch(
self._batches_by_id[self._least_batch_id_wo_results].batch)
current_index = self._index_of_batch(batch)
all_prior = False
if current_index <= least_index:
return
# Test to see if all batches from the least_batch to
# the prior batch to the current batch have results.
if all(
all(t.header_signature in self._txn_results
for t in b.transactions)
for b in self._batches[least_index:current_index]):
all_prior = True
if not all_prior:
return
possible_least = self._batches[current_index].header_signature
# Find the first batch from the current batch on, that doesn't have
# all results.
for b in self._batches[current_index:]:
if not all(t.header_signature in self._txn_results
for t in b.transactions):
possible_least = b.header_signature
break
self._least_batch_id_wo_results = possible_least
def set_transaction_execution_result(
self, txn_signature, is_valid, context_id, state_changes=None,
events=None, data=None, error_message="", error_data=b""):
with self._condition:
if txn_signature not in self._scheduled:
raise SchedulerError(
"transaction not scheduled: {}".format(txn_signature))
if txn_signature not in self._batches_by_txn_id:
return
self._set_least_batch_id(txn_signature=txn_signature)
if not is_valid:
self._remove_subsequent_result_because_of_batch_failure(
txn_signature)
is_rescheduled = self._reschedule_if_outstanding(txn_signature)
if not is_rescheduled:
self._txn_results[txn_signature] = TxnExecutionResult(
signature=txn_signature,
is_valid=is_valid,
context_id=context_id if is_valid else None,
state_hash=self._first_state_hash if is_valid else None,
state_changes=state_changes,
events=events,
data=data,
error_message=error_message,
error_data=error_data)
self._condition.notify_all()
def _has_predecessors(self, txn_id):
for predecessor_id in self._txn_predecessors[txn_id]:
if predecessor_id not in self._txn_results:
return True
# Since get_initial_state_for_transaction gets context ids not
# just from predecessors but also in the case of an enclosing
# writer failing, predecessors of that predecessor, this extra
# check is needed.
for pre_pred_id in self._txn_predecessors[predecessor_id]:
if pre_pred_id not in self._txn_results:
return True
return False
def _is_outstanding(self, txn_id):
return txn_id in self._outstanding
def _txn_is_in_valid_batch(self, txn_id):
"""Returns whether the transaction is in a valid batch.
Args:
txn_id (str): The transaction header signature.
Returns:
(bool): True if the txn's batch is valid, False otherwise.
"""
batch = self._batches_by_txn_id[txn_id]
# Return whether every transaction in the batch with a
# transaction result is valid
return all(
self._txn_results[sig].is_valid
for sig in set(self._txn_results).intersection(
(txn.header_signature for txn in batch.transactions)))
def _get_initial_state_for_transaction(self, txn):
# Collect contexts that this transaction depends upon
# We assume that all prior txns in the batch are valid
# or else this transaction wouldn't run. We assume that
# the mechanism in next_transaction makes sure that each
# predecessor txn has a result. Also any explicit
# dependencies that could have failed this txn did so.
contexts = []
txn_dependencies = deque()
txn_dependencies.extend(self._txn_predecessors[txn.header_signature])
while txn_dependencies:
prior_txn_id = txn_dependencies.popleft()
if self._txn_is_in_valid_batch(prior_txn_id):
result = self._txn_results[prior_txn_id]
if (prior_txn_id, result.context_id) not in contexts:
contexts.append((prior_txn_id, result.context_id))
else:
txn_dependencies.extend(self._txn_predecessors[prior_txn_id])
contexts.sort(
key=lambda x: self._index_of_txn_in_schedule(x[0]),
reverse=True)
return [c_id for _, c_id in contexts]
def _index_of_txn_in_schedule(self, txn_id):
batch = self._batches_by_txn_id[txn_id]
index_of_batch_in_schedule = self._batches.index(batch)
number_of_txns_in_prior_batches = 0
for prior in self._batches[:index_of_batch_in_schedule]:
number_of_txns_in_prior_batches += len(prior.transactions)
txn_index, _ = next(
(i, t)
for i, t in enumerate(batch.transactions)
if t.header_signature == txn_id)
return number_of_txns_in_prior_batches + txn_index - 1
def _can_fail_fast(self, txn_id):
batch_id = self._batches_by_txn_id[txn_id].header_signature
return batch_id == self._least_batch_id_wo_results
def next_transaction(self):
with self._condition:
# We return the next transaction which hasn't been scheduled and
# is not blocked by a dependency.
next_txn = None
no_longer_available = []
for txn_id, txn in self._txns_available.items():
if (self._has_predecessors(txn_id)
or self._is_outstanding(txn_id)):
continue
header = TransactionHeader()
header.ParseFromString(txn.header)
deps = tuple(header.dependencies)
if self._dependency_not_processed(deps):
continue
if self._txn_failed_by_dep(deps):
no_longer_available.append(txn_id)
self._txn_results[txn_id] = \
TxnExecutionResult(
signature=txn_id,
is_valid=False,
context_id=None,
state_hash=None)
continue
if not self._txn_is_in_valid_batch(txn_id) and \
self._can_fail_fast(txn_id):
self._txn_results[txn_id] = \
TxnExecutionResult(False, None, None)
no_longer_available.append(txn_id)
continue
next_txn = txn
break
for txn_id in no_longer_available:
del self._txns_available[txn_id]
if next_txn is not None:
bases = self._get_initial_state_for_transaction(next_txn)
info = TxnInformation(
txn=next_txn,
state_hash=self._first_state_hash,
base_context_ids=bases)
self._scheduled.append(next_txn.header_signature)
del self._txns_available[next_txn.header_signature]
self._scheduled_txn_info[next_txn.header_signature] = info
return info
return None
def _dependency_not_processed(self, deps):
if any(not self._all_in_batch_have_results(d)
for d in deps
if d in self._batches_by_txn_id):
return True
return False
def _txn_failed_by_dep(self, deps):
if any(self._any_in_batch_are_invalid(d)
for d in deps
if d in self._batches_by_txn_id):
return True
return False
def _all_in_batch_have_results(self, txn_id):
batch = self._batches_by_txn_id[txn_id]
return all(
t.header_signature in self._txn_results
for t in list(batch.transactions))
def _any_in_batch_are_invalid(self, txn_id):
batch = self._batches_by_txn_id[txn_id]
return any(not self._txn_results[t.header_signature].is_valid
for t in list(batch.transactions))
def available(self):
with self._condition:
# We return the next transaction which hasn't been scheduled and
# is not blocked by a dependency.
count = 0
for txn_id in self._txns_available:
if not self._has_predecessors(txn_id):
count += 1
return count
def unschedule_incomplete_batches(self):
incomplete_batches = set()
with self._condition:
# These transactions have never been scheduled.
for txn_id, txn in self._txns_available.items():
batch = self._batches_by_txn_id[txn_id]
batch_id = batch.header_signature
annotated_batch = self._batches_by_id[batch_id]
if not annotated_batch.preserve:
incomplete_batches.add(batch_id)
# These transactions were in flight.
in_flight = set(self._transactions.keys()).difference(
self._txn_results.keys())
for txn_id in in_flight:
batch = self._batches_by_txn_id[txn_id]
batch_id = batch.header_signature
annotated_batch = self._batches_by_id[batch_id]
if not annotated_batch.preserve:
incomplete_batches.add(batch_id)
# clean up the batches, including partial complete information
for batch_id in incomplete_batches:
annotated_batch = self._batches_by_id[batch_id]
self._batches.remove(annotated_batch.batch)
del self._batches_by_id[batch_id]
for txn in annotated_batch.batch.transactions:
txn_id = txn.header_signature
del self._batches_by_txn_id[txn_id]
if txn_id in self._txn_results:
del self._txn_results[txn_id]
if txn_id in self._txns_available:
del self._txns_available[txn_id]
if txn_id in self._outstanding:
self._outstanding.remove(txn_id)
self._condition.notify_all()
if incomplete_batches:
LOGGER.debug('Removed %s incomplete batches from the schedule',
len(incomplete_batches))
def is_transaction_in_schedule(self, txn_signature):
with self._condition:
return txn_signature in self._batches_by_txn_id
def finalize(self):
with self._condition:
self._final = True
self._condition.notify_all()
def _complete(self):
return self._final and \
len(self._txn_results) == len(self._batches_by_txn_id)
def complete(self, block=True):
with self._condition:
if self._complete():
return True
if block:
return self._condition.wait_for(self._complete)
return False
def __del__(self):
self.cancel()
def __iter__(self):
return SchedulerIterator(self, self._condition)
def count(self):
with self._condition:
return len(self._scheduled)
def get_transaction(self, index):
with self._condition:
return self._scheduled_txn_info[self._scheduled[index]]
def cancel(self):
with self._condition:
if not self._cancelled and not self._final:
contexts = [
tr.context_id for tr in self._txn_results.values()
if tr.context_id
]
self._squash(
self._first_state_hash,
contexts,
persist=False,
clean_up=True)
self._cancelled = True
self._condition.notify_all()
def is_cancelled(self):
with self._condition:
return self._cancelled
def _first(iterator):
try:
return next(iterator)
except StopIteration:
return None
class _UnscheduledTransactionError(Exception):
"""Thrown when information on a transaction is requested, but the
transaction has been unscheduled.
"""
pass
| en | 0.933242 | # Copyright 2016-2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ This tree is a prefix tree: a node's address is always a strict prefix of the addresses of its children, and every node either has data or has multiple children. # A node's address is always a proper prefix of the addresses # of its children, so we only need to check the ordering. A # more explicit but also more verbose and probably slower # check would be: # # while address != node.address and address.startswith(node.address): # Walk to ADDRESS, creating nodes if necessary, and set the data there to UPDATER(data). Arguments: address (str): the address to be updated Remove all children (and descendants) below ADDRESS. Arguments: address (str): the address to be pruned Returns a stream of pairs of node addresses and data, raising AddressNotInTree if ADDRESS is not in the tree. First the ancestors of ADDRESS (including itself) are yielded, earliest to latest, and then the descendants of ADDRESS are yielded in an unspecified order. Arguments: address (str): the address to be walked # Walk as far down the tree as possible. If the desired # address is reached, return that node. Otherwise, add a new # one. # The rest of the function deals with adding a new node, # but there's no sense adding a level of indentation, so # just pass here. # The address isn't in the tree, so a new node will be added # one way or another. # Try to get the next child with a matching prefix. # There's no match, so just add the new address as a child. # If node address is 'rustic' and the address being added is # 'rust', then 'rust' will be the intermediate node taking # 'rustic' as a child. # The address and the match address share a common prefix, so # an intermediate node with the prefix as its address will # take them both as children. Returns all predecessor transaction ids for a write of the provided address. Arguments: address (str): the radix address Returns: a set of transaction ids # A write operation must be preceded by: # - The "enclosing writer", which is the writer at the address or # the nearest writer higher (closer to the root) in the tree. # - The "enclosing readers", which are the readers at the address # or higher in the tree. # - The "children writers", which include all writers which are # lower in the tree than the address. # - The "children readers", which include all readers which are # lower in the tree than the address. # # The enclosing writer must be added as it may have modified a node # which must not happen after the current write. # # Writers which are higher in the tree than the enclosing writer may # have modified a node at or under the given address. However, we do # not need to include them here as they will have been considered a # predecessor to the enclosing writer. # # Enclosing readers must be included. Technically, we only need to add # enclosing readers which occurred after the enclosing writer, since # the readers preceding the writer will have been considered a # predecessor of the enclosing writer. However, with the current # data structure we can not determine the difference between readers # so we specify them all; this is mostly harmless as it will not change # the eventual sort order generated by the scheduler. # # Children readers must be added, since their reads must happen prior # to the write. # First, walk down from the root to the address, collecting all readers # and updating the enclosing_writer if needed. # If the address isn't on the tree, then there aren't any # predecessors below the node to worry about (because there # isn't anything at all), so return the predecessors that have # already been collected. # Next, descend down the tree starting at the address node and # find all descendant readers and writers. Returns all predecessor transaction ids for a read of the provided address. Arguments: address (str): the radix address Returns: a set of transaction ids # A read operation must be preceded by: # - The "enclosing writer", which is the writer at the address or # the nearest writer higher (closer to the root) in the tree. # - All "children writers", which include all writers which are # lower in the tree than the address. # # The enclosing writer must be added as it is possible it updated the # contents stored at address. # # Writers which are higher in the tree than the enclosing writer may # have modified the address. However, we do not need to include them # here as they will have been considered a predecessor to the enclosing # writer. # # Children writers must be included as they may have updated addresses # lower in the tree, and these writers will have always been preceded # by the enclosing writer. # # We do not need to add any readers, since a reader cannot impact the # value which we are reading. The relationship is transitive, in that # this reader will also not impact the readers already recorded in the # tree. # First, walk down from the root to the address, updating the # enclosing_writer if needed. # If the address isn't on the tree, then there aren't any # predecessors below the node to worry about (because there # isn't anything at all), so return the predecessors that have # already been collected. # Next, descend down the tree starting at the address node and # find all descendant writers. Add a predecessor-successor relationship between one txn id and a set of predecessors. Args: txn_id (str): The transaction id of the transaction. predecessors (set): The transaction ids of the transaction's predecessors Returns: None Returns whether the predecessor is a predecessor or a predecessor of a predecessor...of any of the others. Args: predecessor (str): The txn id of the predecessor. others (list(str)): The txn id of the successor. Returns: (bool) # Transaction identifiers which have been scheduled. Stored as a list, # since order is important; SchedulerIterator instances, for example, # must all return scheduled transactions in the same order. # Transactions that must be replayed but the prior result hasn't # been returned yet. # Batch id for the batch with the property that the batch doesn't have # all txn results, and all batches prior to it have all their txn # results. # A dict of transaction id to TxnInformation objects, containing all # transactions present in self._scheduled. # All batches in their natural order (the order they were added to # the scheduler. # The batches that have state hashes added in add_batch, used in # Block validation. # Indexes to find a batch quickly # Transaction results Use the predecessor tree to find dependencies based on inputs. Returns: A list of transaction ids. Use the predecessor tree to find dependencies based on outputs. Returns: A list of transaction ids. # If this is the first non-required batch, it is preserved for # the schedule to be completed (i.e. no empty schedules in the # event of unschedule_incomplete_batches being called before # the first batch is completed). # For dependency handling: First, we determine our dependencies # based on the current state of the predecessor tree. Second, # we update the predecessor tree with reader and writer # information based on input and outputs. # Calculate predecessors (transaction ids which must come # prior to the current transaction). # Update our internal state with the computed predecessors. # Update the predecessor tree. # # Order of reader/writer operations is relevant. A writer # may overshadow a reader. For example, if the transaction # has the same input/output address, the end result will be # this writer (txn.header_signature) stored at the address of # the predecessor tree. The reader information will have been # discarded. Write operations to partial addresses will also # overshadow entire parts of the predecessor tree. # # Thus, the order here (inputs then outputs) will cause the # minimal amount of relevant information to be stored in the # predecessor tree, with duplicate information being # automatically discarded by the set_writer() call. Starting with the batch referenced by batch_signature, iterate back through the batches and for each valid batch collect the context_id. At the end remove contexts for txns that are other txn's predecessors. Args: batch_signature (str): The batch to start from, moving back through the batches in the scheduler Returns: (list): Context ids that haven't been previous base contexts. # This method calculates the BatchExecutionResult on the fly, # where only the TxnExecutionResults are cached, instead # of BatchExecutionResults, as in the SerialScheduler Decide if possible_successor should be replayed. Args: txn_id (str): Id of txn in failed batch. possible_successor (str): Id of txn to possibly replay. already_seen (list): A list of possible_successors that have been replayed. Returns: (bool): If the possible_successor should be replayed. Remove transactions from scheduled and txn_results for successors of txns in a failed batch. These transactions will now, or in the future be rescheduled in next_transaction; giving a replay ability. Args: sig (str): Transaction header signature Set the first batch id that doesn't have all results. Args: txn_signature (str): The txn identifier of the transaction with results being set. # Test to see if all batches from the least_batch to # the prior batch to the current batch have results. # Find the first batch from the current batch on, that doesn't have # all results. # Since get_initial_state_for_transaction gets context ids not # just from predecessors but also in the case of an enclosing # writer failing, predecessors of that predecessor, this extra # check is needed. Returns whether the transaction is in a valid batch. Args: txn_id (str): The transaction header signature. Returns: (bool): True if the txn's batch is valid, False otherwise. # Return whether every transaction in the batch with a # transaction result is valid # Collect contexts that this transaction depends upon # We assume that all prior txns in the batch are valid # or else this transaction wouldn't run. We assume that # the mechanism in next_transaction makes sure that each # predecessor txn has a result. Also any explicit # dependencies that could have failed this txn did so. # We return the next transaction which hasn't been scheduled and # is not blocked by a dependency. # We return the next transaction which hasn't been scheduled and # is not blocked by a dependency. # These transactions have never been scheduled. # These transactions were in flight. # clean up the batches, including partial complete information Thrown when information on a transaction is requested, but the transaction has been unscheduled. | 2.154305 | 2 |
ntscQT.py | JargeZ/vhs | 43 | 6633091 | import os
import sys
from pathlib import Path
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import QLibraryInfo
from app import NtscApp
from app import logger
os.environ["QT_QPA_PLATFORM_PLUGIN_PATH"] = QLibraryInfo.location(
QLibraryInfo.PluginsPath
)
def crash_handler(type, value, tb):
logger.trace(value)
logger.exception("Uncaught exception: {0}".format(str(value)))
sys.exit(1)
# Install exception handler
sys.excepthook = crash_handler
def main():
translator = QtCore.QTranslator()
locale = QtCore.QLocale.system().name()
# if run by pyinstaller executable, frozen attr will be true
if getattr(sys, 'frozen', False):
# _MEIPASS contain temp pyinstaller dir
base_dir = Path(sys._MEIPASS)
locale_file = str((base_dir / 'translate' / f'{locale}.qm').resolve())
else:
base_dir = Path(__file__).absolute().parent
locale_file = str((base_dir / 'translate' / f'{locale}.qm').resolve())
print(f"Try load {locale} locale: {locale_file}")
if translator.load(locale_file):
print(f'Localization loaded: {locale}') # name, dir
else:
print("Using default translation")
app = QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication
app.installTranslator(translator)
window = NtscApp()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| import os
import sys
from pathlib import Path
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import QLibraryInfo
from app import NtscApp
from app import logger
os.environ["QT_QPA_PLATFORM_PLUGIN_PATH"] = QLibraryInfo.location(
QLibraryInfo.PluginsPath
)
def crash_handler(type, value, tb):
logger.trace(value)
logger.exception("Uncaught exception: {0}".format(str(value)))
sys.exit(1)
# Install exception handler
sys.excepthook = crash_handler
def main():
translator = QtCore.QTranslator()
locale = QtCore.QLocale.system().name()
# if run by pyinstaller executable, frozen attr will be true
if getattr(sys, 'frozen', False):
# _MEIPASS contain temp pyinstaller dir
base_dir = Path(sys._MEIPASS)
locale_file = str((base_dir / 'translate' / f'{locale}.qm').resolve())
else:
base_dir = Path(__file__).absolute().parent
locale_file = str((base_dir / 'translate' / f'{locale}.qm').resolve())
print(f"Try load {locale} locale: {locale_file}")
if translator.load(locale_file):
print(f'Localization loaded: {locale}') # name, dir
else:
print("Using default translation")
app = QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication
app.installTranslator(translator)
window = NtscApp()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| en | 0.312087 | # Install exception handler # if run by pyinstaller executable, frozen attr will be true # _MEIPASS contain temp pyinstaller dir # name, dir # Новый экземпляр QApplication | 2.168869 | 2 |
python/models/bilstm5.py | mega002/DANN-MNLI | 0 | 6633092 | import tensorflow as tf
from util import blocks
from util.flip_gradient import flip_gradient
class MyModel(object):
def __init__(self, seq_length, emb_dim, hidden_dim, embeddings, emb_train):
## Define hyperparameters
self.embedding_dim = emb_dim
self.dim = hidden_dim
self.sequence_length = seq_length
## Define the placeholders
self.premise_x = tf.placeholder(tf.int32, [None, self.sequence_length], name='premise_x')
self.hypothesis_x = tf.placeholder(tf.int32, [None, self.sequence_length], name='hypothesis_x')
self.y = tf.placeholder(tf.int32, [None], name='y')
self.d = tf.placeholder(tf.int32, [None], name='d')
self.keep_rate_ph = tf.placeholder(tf.float32, [], name='keep_rate_ph')
self.train = tf.placeholder(tf.bool, [], name='train')
### Feature extractor
with tf.variable_scope('feature_extractor'):
self.E = tf.Variable(embeddings, trainable=emb_train, name='E')
## Fucntion for embedding lookup and dropout at embedding layer
def emb_drop(x):
emb = tf.nn.embedding_lookup(self.E, x)
emb_drop = tf.nn.dropout(emb, self.keep_rate_ph)
return emb_drop
# Get lengths of unpadded sentences
prem_seq_lengths, prem_mask = blocks.length(self.premise_x)
hyp_seq_lengths, hyp_mask = blocks.length(self.hypothesis_x)
### BiLSTM layer ###
premise_in = emb_drop(self.premise_x)
hypothesis_in = emb_drop(self.hypothesis_x)
premise_outs, c1 = blocks.biLSTM(premise_in, dim=self.dim, seq_len=prem_seq_lengths, name='premise')
hypothesis_outs, c2 = blocks.biLSTM(hypothesis_in, dim=self.dim, seq_len=hyp_seq_lengths, name='hypothesis')
premise_bi = tf.concat(premise_outs, axis=2, name='premise_bi')
hypothesis_bi = tf.concat(hypothesis_outs, axis=2, name='hypothesis_bi')
#premise_final = blocks.last_output(premise_bi, prem_seq_lengths)
#hypothesis_final = blocks.last_output(hypothesis_bi, hyp_seq_lengths)
### Mean pooling
premise_sum = tf.reduce_sum(premise_bi, 1)
premise_ave = tf.div(premise_sum, tf.expand_dims(tf.cast(prem_seq_lengths, tf.float32), -1), name='premise_ave')
hypothesis_sum = tf.reduce_sum(hypothesis_bi, 1)
hypothesis_ave = tf.div(hypothesis_sum, tf.expand_dims(tf.cast(hyp_seq_lengths, tf.float32), -1), name='hypothesis_ave')
### Mou et al. concat layer ###
diff = tf.subtract(premise_ave, hypothesis_ave, name='diff')
mul = tf.multiply(premise_ave, hypothesis_ave, name='mul')
self.h = tf.concat([premise_ave, hypothesis_ave, diff, mul], 1, name='h')
### Label predictor
with tf.variable_scope('label_predictor'):
# Variables
self.W_pred_mlp = tf.Variable(tf.random_normal([self.dim * 8, self.dim], stddev=0.1), name='W_pred_mlp')
self.b_pred_mlp = tf.Variable(tf.random_normal([self.dim], stddev=0.1), name='b_pred_mlp')
self.W_pred_cl = tf.Variable(tf.random_normal([self.dim, 3], stddev=0.1), name='W_pred_cl')
self.b_pred_cl = tf.Variable(tf.random_normal([3], stddev=0.1), name='b_pred_cl')
# MLP layer
h_pred_mlp = tf.nn.relu(tf.matmul(self.h, self.W_pred_mlp) + self.b_pred_mlp)
# Dropout applied to classifier
h_pred_drop = tf.nn.dropout(h_pred_mlp, self.keep_rate_ph)
# Get prediction
self.pred_logits = tf.matmul(h_pred_drop, self.W_pred_cl) + self.b_pred_cl
# Define the cost function
self.pred_cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.pred_logits))
tf.summary.scalar('pred_loss', self.pred_cost)
### Domain classifier
with tf.variable_scope('domain_classifier'):
# Flip the gradient when backpropagating through this operation
h_ = flip_gradient(self.h)
# Variables
self.W_domain_mlp = tf.Variable(tf.random_normal([self.dim * 8, self.dim], stddev=0.1), name='W_domain_mlp')
self.b_domain_mlp = tf.Variable(tf.random_normal([self.dim], stddev=0.1), name='b_domain_mlp')
self.W_domain_cl = tf.Variable(tf.random_normal([self.dim, 5], stddev=0.1), name='W_domain_cl')
self.b_domain_cl = tf.Variable(tf.random_normal([5], stddev=0.1), name='b_domain_cl')
# MLP layer
h_domain_mlp = tf.nn.relu(tf.add(tf.matmul(h_, self.W_domain_mlp), self.b_domain_mlp))
# Dropout applied to classifier
h_domain_drop = tf.nn.dropout(h_domain_mlp, self.keep_rate_ph)
# Get prediction
self.domain_logits = tf.add(tf.matmul(h_domain_drop, self.W_domain_cl), self.b_domain_cl)
domain_probs = tf.nn.softmax(self.domain_logits)
#self.domain_neg_entropy = tf.reduce_mean(tf.reduce_sum(tf.multiply(domain_probs, log2(domain_probs)), axis=1, name='domain_neg_ent'))
self.domain_neg_entropy = tf.reduce_mean(tf.reduce_sum(
tf.multiply(domain_probs, tf.log(tf.clip_by_value(domain_probs, 1e-10, 1.0))),
axis=1, name='domain_neg_ent'))
tf.summary.scalar('domain_neg_entropy', self.domain_neg_entropy)
# Define the cost function
self.domain_cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.d, logits=self.domain_logits))
tf.summary.scalar('domain_loss', self.domain_cost)
'''
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator
''' | import tensorflow as tf
from util import blocks
from util.flip_gradient import flip_gradient
class MyModel(object):
def __init__(self, seq_length, emb_dim, hidden_dim, embeddings, emb_train):
## Define hyperparameters
self.embedding_dim = emb_dim
self.dim = hidden_dim
self.sequence_length = seq_length
## Define the placeholders
self.premise_x = tf.placeholder(tf.int32, [None, self.sequence_length], name='premise_x')
self.hypothesis_x = tf.placeholder(tf.int32, [None, self.sequence_length], name='hypothesis_x')
self.y = tf.placeholder(tf.int32, [None], name='y')
self.d = tf.placeholder(tf.int32, [None], name='d')
self.keep_rate_ph = tf.placeholder(tf.float32, [], name='keep_rate_ph')
self.train = tf.placeholder(tf.bool, [], name='train')
### Feature extractor
with tf.variable_scope('feature_extractor'):
self.E = tf.Variable(embeddings, trainable=emb_train, name='E')
## Fucntion for embedding lookup and dropout at embedding layer
def emb_drop(x):
emb = tf.nn.embedding_lookup(self.E, x)
emb_drop = tf.nn.dropout(emb, self.keep_rate_ph)
return emb_drop
# Get lengths of unpadded sentences
prem_seq_lengths, prem_mask = blocks.length(self.premise_x)
hyp_seq_lengths, hyp_mask = blocks.length(self.hypothesis_x)
### BiLSTM layer ###
premise_in = emb_drop(self.premise_x)
hypothesis_in = emb_drop(self.hypothesis_x)
premise_outs, c1 = blocks.biLSTM(premise_in, dim=self.dim, seq_len=prem_seq_lengths, name='premise')
hypothesis_outs, c2 = blocks.biLSTM(hypothesis_in, dim=self.dim, seq_len=hyp_seq_lengths, name='hypothesis')
premise_bi = tf.concat(premise_outs, axis=2, name='premise_bi')
hypothesis_bi = tf.concat(hypothesis_outs, axis=2, name='hypothesis_bi')
#premise_final = blocks.last_output(premise_bi, prem_seq_lengths)
#hypothesis_final = blocks.last_output(hypothesis_bi, hyp_seq_lengths)
### Mean pooling
premise_sum = tf.reduce_sum(premise_bi, 1)
premise_ave = tf.div(premise_sum, tf.expand_dims(tf.cast(prem_seq_lengths, tf.float32), -1), name='premise_ave')
hypothesis_sum = tf.reduce_sum(hypothesis_bi, 1)
hypothesis_ave = tf.div(hypothesis_sum, tf.expand_dims(tf.cast(hyp_seq_lengths, tf.float32), -1), name='hypothesis_ave')
### Mou et al. concat layer ###
diff = tf.subtract(premise_ave, hypothesis_ave, name='diff')
mul = tf.multiply(premise_ave, hypothesis_ave, name='mul')
self.h = tf.concat([premise_ave, hypothesis_ave, diff, mul], 1, name='h')
### Label predictor
with tf.variable_scope('label_predictor'):
# Variables
self.W_pred_mlp = tf.Variable(tf.random_normal([self.dim * 8, self.dim], stddev=0.1), name='W_pred_mlp')
self.b_pred_mlp = tf.Variable(tf.random_normal([self.dim], stddev=0.1), name='b_pred_mlp')
self.W_pred_cl = tf.Variable(tf.random_normal([self.dim, 3], stddev=0.1), name='W_pred_cl')
self.b_pred_cl = tf.Variable(tf.random_normal([3], stddev=0.1), name='b_pred_cl')
# MLP layer
h_pred_mlp = tf.nn.relu(tf.matmul(self.h, self.W_pred_mlp) + self.b_pred_mlp)
# Dropout applied to classifier
h_pred_drop = tf.nn.dropout(h_pred_mlp, self.keep_rate_ph)
# Get prediction
self.pred_logits = tf.matmul(h_pred_drop, self.W_pred_cl) + self.b_pred_cl
# Define the cost function
self.pred_cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.pred_logits))
tf.summary.scalar('pred_loss', self.pred_cost)
### Domain classifier
with tf.variable_scope('domain_classifier'):
# Flip the gradient when backpropagating through this operation
h_ = flip_gradient(self.h)
# Variables
self.W_domain_mlp = tf.Variable(tf.random_normal([self.dim * 8, self.dim], stddev=0.1), name='W_domain_mlp')
self.b_domain_mlp = tf.Variable(tf.random_normal([self.dim], stddev=0.1), name='b_domain_mlp')
self.W_domain_cl = tf.Variable(tf.random_normal([self.dim, 5], stddev=0.1), name='W_domain_cl')
self.b_domain_cl = tf.Variable(tf.random_normal([5], stddev=0.1), name='b_domain_cl')
# MLP layer
h_domain_mlp = tf.nn.relu(tf.add(tf.matmul(h_, self.W_domain_mlp), self.b_domain_mlp))
# Dropout applied to classifier
h_domain_drop = tf.nn.dropout(h_domain_mlp, self.keep_rate_ph)
# Get prediction
self.domain_logits = tf.add(tf.matmul(h_domain_drop, self.W_domain_cl), self.b_domain_cl)
domain_probs = tf.nn.softmax(self.domain_logits)
#self.domain_neg_entropy = tf.reduce_mean(tf.reduce_sum(tf.multiply(domain_probs, log2(domain_probs)), axis=1, name='domain_neg_ent'))
self.domain_neg_entropy = tf.reduce_mean(tf.reduce_sum(
tf.multiply(domain_probs, tf.log(tf.clip_by_value(domain_probs, 1e-10, 1.0))),
axis=1, name='domain_neg_ent'))
tf.summary.scalar('domain_neg_entropy', self.domain_neg_entropy)
# Define the cost function
self.domain_cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.d, logits=self.domain_logits))
tf.summary.scalar('domain_loss', self.domain_cost)
'''
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator
''' | en | 0.443752 | ## Define hyperparameters ## Define the placeholders ### Feature extractor ## Fucntion for embedding lookup and dropout at embedding layer # Get lengths of unpadded sentences ### BiLSTM layer ### #premise_final = blocks.last_output(premise_bi, prem_seq_lengths) #hypothesis_final = blocks.last_output(hypothesis_bi, hyp_seq_lengths) ### Mean pooling ### Mou et al. concat layer ### ### Label predictor # Variables # MLP layer # Dropout applied to classifier # Get prediction # Define the cost function ### Domain classifier # Flip the gradient when backpropagating through this operation # Variables # MLP layer # Dropout applied to classifier # Get prediction #self.domain_neg_entropy = tf.reduce_mean(tf.reduce_sum(tf.multiply(domain_probs, log2(domain_probs)), axis=1, name='domain_neg_ent')) # Define the cost function def log2(x): numerator = tf.log(x) denominator = tf.log(tf.constant(2, dtype=numerator.dtype)) return numerator / denominator | 2.37166 | 2 |
lib/spack/spack/cmd/unuse.py | HaochengLIU/spack | 1 | 6633093 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
from spack.cmd.common import print_module_placeholder_help
description = "remove package from environment using dotkit"
section = "modules"
level = "long"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help='spec of package to unuse with dotkit')
def unuse(parser, args):
print_module_placeholder_help()
| # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import argparse
from spack.cmd.common import print_module_placeholder_help
description = "remove package from environment using dotkit"
section = "modules"
level = "long"
def setup_parser(subparser):
"""Parser is only constructed so that this prints a nice help
message with -h. """
subparser.add_argument(
'spec', nargs=argparse.REMAINDER,
help='spec of package to unuse with dotkit')
def unuse(parser, args):
print_module_placeholder_help()
| en | 0.743565 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) Parser is only constructed so that this prints a nice help message with -h. | 1.820526 | 2 |
mmtf/codecs/__init__.py | jonwedell/mmtf-python | 36 | 6633094 | from .default_codec import decode_array,encode_array | from .default_codec import decode_array,encode_array | none | 1 | 1.083258 | 1 |
|
feed_fwd_NN_from_scratch.py | Prashant47/deep-learning-elu | 0 | 6633095 | <reponame>Prashant47/deep-learning-elu<filename>feed_fwd_NN_from_scratch.py
class Feed_fwd_nn:
n_hidden_layers = 0
n_hidden_diamensions = []
n_input = 0
n_output = 0
weights = []
biases = []
diamensions = []
activations = []
delta_weights = []
delta_biases = []
def __init__(self, n_input, n_output, n_hidden_diamensions):
np.random.seed(0)
self.n_hidden_layers = len(n_hidden_diamensions)
self.n_hidden_diamensions = n_hidden_diamensions
self.n_input = n_input
self.n_output = n_output
diamensions = []
diamensions.append(n_input)
diamensions.extend(n_hidden_diamensions)
diamensions.append(n_output)
self.diamensions = diamensions
for i in range(len(self.diamensions)-1):
weight = np.random.randn(self.diamensions[i], self.diamensions[i+1]) / np.sqrt(self.diamensions[i])
bias = np.zeros((1, self.diamensions[i+1]))
self.weights.append(weight)
self.biases.append(bias)
self.delta_weights = [np.zeros(w.shape) for w in self.weights]
self.delta_biases = [np.zeros(b.shape) for b in self.biases]
def forward_propagate(self, x):
activations = []
z_list = []
a = np.array(x)
activations.append(a)
## forward propagation
for i in range(len(self.diamensions)-1):
z = a.dot(self.weights[i]) + self.biases[i]
z_list.append(z)
a = np.tanh(z)
activations.append(a)
exp_scores = np.exp(a)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
print activations
return z_list, activations
def predict(self, x):
a = np.array(x)
self.activations.append(a)
## forward propagation
for i in range(len(self.diamensions)-1):
z = a.dot(self.weights[i]) + self.biases[i]
a = np.tanh(z)
self.activations.append(a)
#print self.activations
exp_scores = np.exp(a)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
# learning rate,
def fit(self, X_train, y_train, num_iterations = 20000, learning_rate = 0.01, regularization = 0.01):
for i in range(num_iterations):
z, activations = self.forward_propagate(X_train)
delta = self.cost_prime(activations[-1], y_train) * self.sigmoid_prime(z[-1])
self.delta_biases[-1] = delta
self.delta_weights[-1] = np.dot(delta, activations[-1].transpose())
for j in xrange(2, len(self.diamensions)):
z_value = z[-j]
sp = self.sigmoid_prime(z_value)
delta = np.dot(delta, self.weights[-j+1].transpose()) * sp
self.delta_biases[-j] = delta
self.delta_weights[-j] = np.dot(delta, activations[-j].transpose())
self.biases[-j] += self.delta_biases[-j]
self.weights[-j] += self.delta_weights[-j]
if num_iterations % 1000 == 0:
print "iteration : " + str(i)
def cost_prime(self, calculated, observed):
return observed - calculated
def sigmoid(self, z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(self, z):
return self.sigmoid(z)*(1-self.sigmoid(z))
def score(self):
pass
| class Feed_fwd_nn:
n_hidden_layers = 0
n_hidden_diamensions = []
n_input = 0
n_output = 0
weights = []
biases = []
diamensions = []
activations = []
delta_weights = []
delta_biases = []
def __init__(self, n_input, n_output, n_hidden_diamensions):
np.random.seed(0)
self.n_hidden_layers = len(n_hidden_diamensions)
self.n_hidden_diamensions = n_hidden_diamensions
self.n_input = n_input
self.n_output = n_output
diamensions = []
diamensions.append(n_input)
diamensions.extend(n_hidden_diamensions)
diamensions.append(n_output)
self.diamensions = diamensions
for i in range(len(self.diamensions)-1):
weight = np.random.randn(self.diamensions[i], self.diamensions[i+1]) / np.sqrt(self.diamensions[i])
bias = np.zeros((1, self.diamensions[i+1]))
self.weights.append(weight)
self.biases.append(bias)
self.delta_weights = [np.zeros(w.shape) for w in self.weights]
self.delta_biases = [np.zeros(b.shape) for b in self.biases]
def forward_propagate(self, x):
activations = []
z_list = []
a = np.array(x)
activations.append(a)
## forward propagation
for i in range(len(self.diamensions)-1):
z = a.dot(self.weights[i]) + self.biases[i]
z_list.append(z)
a = np.tanh(z)
activations.append(a)
exp_scores = np.exp(a)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
print activations
return z_list, activations
def predict(self, x):
a = np.array(x)
self.activations.append(a)
## forward propagation
for i in range(len(self.diamensions)-1):
z = a.dot(self.weights[i]) + self.biases[i]
a = np.tanh(z)
self.activations.append(a)
#print self.activations
exp_scores = np.exp(a)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
# learning rate,
def fit(self, X_train, y_train, num_iterations = 20000, learning_rate = 0.01, regularization = 0.01):
for i in range(num_iterations):
z, activations = self.forward_propagate(X_train)
delta = self.cost_prime(activations[-1], y_train) * self.sigmoid_prime(z[-1])
self.delta_biases[-1] = delta
self.delta_weights[-1] = np.dot(delta, activations[-1].transpose())
for j in xrange(2, len(self.diamensions)):
z_value = z[-j]
sp = self.sigmoid_prime(z_value)
delta = np.dot(delta, self.weights[-j+1].transpose()) * sp
self.delta_biases[-j] = delta
self.delta_weights[-j] = np.dot(delta, activations[-j].transpose())
self.biases[-j] += self.delta_biases[-j]
self.weights[-j] += self.delta_weights[-j]
if num_iterations % 1000 == 0:
print "iteration : " + str(i)
def cost_prime(self, calculated, observed):
return observed - calculated
def sigmoid(self, z):
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(self, z):
return self.sigmoid(z)*(1-self.sigmoid(z))
def score(self):
pass | en | 0.613174 | ## forward propagation ## forward propagation #print self.activations # learning rate, | 2.325847 | 2 |
gramex/http.py | NAnnamalai/gramex | 130 | 6633096 | <filename>gramex/http.py<gh_stars>100-1000
'''
Standard (and some non-standard) HTTP codes.
We don't use six.moves.http_client because it doesn't contain codes like:
CLIENT_TIMEOUT, RATE_LIMITED, TOO_MANY_REQUESTS. Here, we add them all.
'''
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
RATE_LIMITED = 420
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
TOO_MANY_REQUESTS = 429
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
CLIENT_TIMEOUT = 599
| <filename>gramex/http.py<gh_stars>100-1000
'''
Standard (and some non-standard) HTTP codes.
We don't use six.moves.http_client because it doesn't contain codes like:
CLIENT_TIMEOUT, RATE_LIMITED, TOO_MANY_REQUESTS. Here, we add them all.
'''
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
RATE_LIMITED = 420
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
TOO_MANY_REQUESTS = 429
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
CLIENT_TIMEOUT = 599
| en | 0.734227 | Standard (and some non-standard) HTTP codes. We don't use six.moves.http_client because it doesn't contain codes like: CLIENT_TIMEOUT, RATE_LIMITED, TOO_MANY_REQUESTS. Here, we add them all. # status codes # informational # successful # redirection # client error # server error | 1.471764 | 1 |
tests/test_launch_game.py | deterok/galaxy-integrations-python-api | 1,165 | 6633097 | <reponame>deterok/galaxy-integrations-python-api
import pytest
from galaxy.unittest.mock import async_return_value
from tests import create_message
@pytest.mark.asyncio
async def test_success(plugin, read):
request = {
"jsonrpc": "2.0",
"method": "launch_game",
"params": {
"game_id": "3"
}
}
read.side_effect = [async_return_value(create_message(request)), async_return_value(b"")]
await plugin.run()
plugin.launch_game.assert_called_with(game_id="3")
| import pytest
from galaxy.unittest.mock import async_return_value
from tests import create_message
@pytest.mark.asyncio
async def test_success(plugin, read):
request = {
"jsonrpc": "2.0",
"method": "launch_game",
"params": {
"game_id": "3"
}
}
read.side_effect = [async_return_value(create_message(request)), async_return_value(b"")]
await plugin.run()
plugin.launch_game.assert_called_with(game_id="3") | none | 1 | 2.204856 | 2 |
|
changelog.py | Sheyin/Lurky | 0 | 6633098 | # Experimental bit to use requests module to read lurky's own changelog (from Github)
# Example code from requests (python module)
import requests
r = requests.get('https://api.github.com/user', auth=('user', 'pass'))
print(r.status_code)
# Should read "200" if successful
print(r.headers['content-type'])
# Should read: 'application/json; charset=utf8'
print(r.encoding)
# Should read: 'utf-8'
print(r.text)
# Should read: u'{"type":"User"...'
print(r.json())
# Should read: {u'private_gists': 419, u'total_private_repos': 77, ...} | # Experimental bit to use requests module to read lurky's own changelog (from Github)
# Example code from requests (python module)
import requests
r = requests.get('https://api.github.com/user', auth=('user', 'pass'))
print(r.status_code)
# Should read "200" if successful
print(r.headers['content-type'])
# Should read: 'application/json; charset=utf8'
print(r.encoding)
# Should read: 'utf-8'
print(r.text)
# Should read: u'{"type":"User"...'
print(r.json())
# Should read: {u'private_gists': 419, u'total_private_repos': 77, ...} | en | 0.713661 | # Experimental bit to use requests module to read lurky's own changelog (from Github) # Example code from requests (python module) # Should read "200" if successful # Should read: 'application/json; charset=utf8' # Should read: 'utf-8' # Should read: u'{"type":"User"...' # Should read: {u'private_gists': 419, u'total_private_repos': 77, ...} | 2.36602 | 2 |
tilapia/lib/provider/exceptions.py | huazhouwang/python_multichain_wallet | 2 | 6633099 | <gh_stars>1-10
from typing import Any
class TransactionNotFound(Exception):
def __init__(self, txid: str):
super(TransactionNotFound, self).__init__(repr(txid))
self.txid = txid
class NoAvailableClient(Exception):
def __init__(self, chain_code: str, candidates: list, instance_required: Any):
super(NoAvailableClient, self).__init__(
f"chain_code: {repr(chain_code)}, candidates: {candidates}, instance_required: {instance_required}"
)
class ProviderClassNotFound(Exception):
def __init__(self, chain_code: str, path: str):
super(ProviderClassNotFound, self).__init__(f"chain_code: {repr(chain_code)}, path: {path}")
class UnknownBroadcastError(Exception):
def __init__(self, message: str):
super(UnknownBroadcastError, self).__init__(f"error message: {message}")
class TransactionAlreadyKnown(UnknownBroadcastError):
pass
class TransactionNonceTooLow(UnknownBroadcastError):
pass
class TransactionUnderpriced(UnknownBroadcastError):
pass
class TransactionGasTooLow(UnknownBroadcastError):
pass
class TransactionGasLimitExceeded(UnknownBroadcastError):
pass
class FailedToGetGasPrices(Exception):
def __init__(self):
super(FailedToGetGasPrices, self).__init__("Failed to get gas prices.")
class InsufficientBalance(Exception):
def __init__(self, _message: str):
super(InsufficientBalance, self).__init__("Insufficient funds")
class FailedToGetSuggestedParams(Exception):
def __init__(self):
super(FailedToGetSuggestedParams, self).__init__("Failed to get suggested params.")
| from typing import Any
class TransactionNotFound(Exception):
def __init__(self, txid: str):
super(TransactionNotFound, self).__init__(repr(txid))
self.txid = txid
class NoAvailableClient(Exception):
def __init__(self, chain_code: str, candidates: list, instance_required: Any):
super(NoAvailableClient, self).__init__(
f"chain_code: {repr(chain_code)}, candidates: {candidates}, instance_required: {instance_required}"
)
class ProviderClassNotFound(Exception):
def __init__(self, chain_code: str, path: str):
super(ProviderClassNotFound, self).__init__(f"chain_code: {repr(chain_code)}, path: {path}")
class UnknownBroadcastError(Exception):
def __init__(self, message: str):
super(UnknownBroadcastError, self).__init__(f"error message: {message}")
class TransactionAlreadyKnown(UnknownBroadcastError):
pass
class TransactionNonceTooLow(UnknownBroadcastError):
pass
class TransactionUnderpriced(UnknownBroadcastError):
pass
class TransactionGasTooLow(UnknownBroadcastError):
pass
class TransactionGasLimitExceeded(UnknownBroadcastError):
pass
class FailedToGetGasPrices(Exception):
def __init__(self):
super(FailedToGetGasPrices, self).__init__("Failed to get gas prices.")
class InsufficientBalance(Exception):
def __init__(self, _message: str):
super(InsufficientBalance, self).__init__("Insufficient funds")
class FailedToGetSuggestedParams(Exception):
def __init__(self):
super(FailedToGetSuggestedParams, self).__init__("Failed to get suggested params.") | none | 1 | 2.699286 | 3 |
|
alipay/aop/api/domain/AlipayOpenSmsgDataSetModel.py | snowxmas/alipay-sdk-python-all | 213 | 6633100 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenSmsgDataSetModel(object):
def __init__(self):
self._paramone = None
self._paramtwo = None
@property
def paramone(self):
return self._paramone
@paramone.setter
def paramone(self, value):
self._paramone = value
@property
def paramtwo(self):
return self._paramtwo
@paramtwo.setter
def paramtwo(self, value):
self._paramtwo = value
def to_alipay_dict(self):
params = dict()
if self.paramone:
if hasattr(self.paramone, 'to_alipay_dict'):
params['paramone'] = self.paramone.to_alipay_dict()
else:
params['paramone'] = self.paramone
if self.paramtwo:
if hasattr(self.paramtwo, 'to_alipay_dict'):
params['paramtwo'] = self.paramtwo.to_alipay_dict()
else:
params['paramtwo'] = self.paramtwo
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenSmsgDataSetModel()
if 'paramone' in d:
o.paramone = d['paramone']
if 'paramtwo' in d:
o.paramtwo = d['paramtwo']
return o
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenSmsgDataSetModel(object):
def __init__(self):
self._paramone = None
self._paramtwo = None
@property
def paramone(self):
return self._paramone
@paramone.setter
def paramone(self, value):
self._paramone = value
@property
def paramtwo(self):
return self._paramtwo
@paramtwo.setter
def paramtwo(self, value):
self._paramtwo = value
def to_alipay_dict(self):
params = dict()
if self.paramone:
if hasattr(self.paramone, 'to_alipay_dict'):
params['paramone'] = self.paramone.to_alipay_dict()
else:
params['paramone'] = self.paramone
if self.paramtwo:
if hasattr(self.paramtwo, 'to_alipay_dict'):
params['paramtwo'] = self.paramtwo.to_alipay_dict()
else:
params['paramtwo'] = self.paramtwo
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenSmsgDataSetModel()
if 'paramone' in d:
o.paramone = d['paramone']
if 'paramtwo' in d:
o.paramtwo = d['paramtwo']
return o
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.276577 | 2 |
app_simulator/models/event.py | nicetester/newmonkey_tab | 0 | 6633101 | # -*- coding: UTF-8 -*-
class EventType(object):
CLICK = 'click'
class EventHandler(object):
def __init__(self, func, callback=None, *args, **kwargs):
self.func = func
self.callback = callback
self.args = args
self.kwargs = kwargs
def __str__(self):
return '%s' % self.kwargs
class BasicEventHandler(object):
def __init__(self):
pass
@staticmethod
def gen_push_activity(app, activity):
return EventHandler(lambda app, activity: app.push_activity(activity), app=app, activity=activity)
class MonkeyEvent(object):
@staticmethod
def get_event_identify(event_info):
SP = '^_^'
identify = event_info.get('event_name') + SP + event_info['event_data']['event_entity']['identify'] + SP + event_info.get('pre_activity') + SP + event_info.get('next_activity')
return identify
@staticmethod
def get_event_identify_from_sm(sm_record):
SP = '^_^'
identify = sm_record.get('event_name') + SP + sm_record.get('event_entity_identify') + SP + sm_record.get('pre_activity') + SP + sm_record.get('next_activity')
return identify
| # -*- coding: UTF-8 -*-
class EventType(object):
CLICK = 'click'
class EventHandler(object):
def __init__(self, func, callback=None, *args, **kwargs):
self.func = func
self.callback = callback
self.args = args
self.kwargs = kwargs
def __str__(self):
return '%s' % self.kwargs
class BasicEventHandler(object):
def __init__(self):
pass
@staticmethod
def gen_push_activity(app, activity):
return EventHandler(lambda app, activity: app.push_activity(activity), app=app, activity=activity)
class MonkeyEvent(object):
@staticmethod
def get_event_identify(event_info):
SP = '^_^'
identify = event_info.get('event_name') + SP + event_info['event_data']['event_entity']['identify'] + SP + event_info.get('pre_activity') + SP + event_info.get('next_activity')
return identify
@staticmethod
def get_event_identify_from_sm(sm_record):
SP = '^_^'
identify = sm_record.get('event_name') + SP + sm_record.get('event_entity_identify') + SP + sm_record.get('pre_activity') + SP + sm_record.get('next_activity')
return identify
| en | 0.222803 | # -*- coding: UTF-8 -*- | 2.676008 | 3 |
tests/reader/test_list_reader.py | ThisIsNima/dwetl | 1 | 6633102 | <gh_stars>1-10
import unittest
from dwetl.reader.list_reader import ListReader
class TestListReader(unittest.TestCase):
def test_simple_array(self):
array = [
'Line 1',
'Line 2'
]
reader = ListReader(array)
self.assertEqual(array[0], next(iter(reader)))
self.assertEqual(array[1], next(iter(reader)))
| import unittest
from dwetl.reader.list_reader import ListReader
class TestListReader(unittest.TestCase):
def test_simple_array(self):
array = [
'Line 1',
'Line 2'
]
reader = ListReader(array)
self.assertEqual(array[0], next(iter(reader)))
self.assertEqual(array[1], next(iter(reader))) | none | 1 | 2.809943 | 3 |
|
tests/components/lg_ess/__init__.py | gluap/home-assistant | 0 | 6633103 | """Tests for the LG ESS integration."""
| """Tests for the LG ESS integration."""
| en | 0.808366 | Tests for the LG ESS integration. | 0.964019 | 1 |
src/_dependencies/replace.py | nicoddemus/dependencies | 0 | 6633104 | from _dependencies.attributes import Attributes
from _dependencies.spec import make_dependency_spec
def deep_replace_dependency(injector, current_attr, replace):
spec = make_dependency_spec(current_attr, replace.dependency)
marker, attribute, args, have_defaults = spec
attribute = Attributes(attribute, replace.attrs)
spec = (marker, attribute, args, have_defaults)
for base in injector.__mro__:
if current_attr in base.__dependencies__:
base.__dependencies__[current_attr] = spec
else:
break
| from _dependencies.attributes import Attributes
from _dependencies.spec import make_dependency_spec
def deep_replace_dependency(injector, current_attr, replace):
spec = make_dependency_spec(current_attr, replace.dependency)
marker, attribute, args, have_defaults = spec
attribute = Attributes(attribute, replace.attrs)
spec = (marker, attribute, args, have_defaults)
for base in injector.__mro__:
if current_attr in base.__dependencies__:
base.__dependencies__[current_attr] = spec
else:
break
| none | 1 | 2.15317 | 2 |
|
src/cookierec.py | Triballian/ordmon | 0 | 6633105 | <gh_stars>0
'''
Created on Mar 27, 2016
andy mckay crecipe http://code.activestate.com/recipes/80443/
@author: Noe
'''
from string import lower, find
import re, os, glob
import win32api, win32con
def _getLocation():
''' Looks through the registry to find the current users Cookie folder. This is the folder IE uses. '''
key = 'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
regkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, key, 0, win32con.KEY_ALL_ACCESS)
num = win32api.RegQueryInfoKey(regkey)[1]
for x in range(0, num):
k = win32api.RegEnumValue(regkey, x)
if k[0] == 'Cookies':
return k[1]
def _getCookieFiles(location, name):
''' Rummages through all the files in the cookie folder, and returns only the ones whose file name, contains name.
Name can be the domain, for example 'activestate' will return all cookies for activestate.
Unfortunately it will also return cookies for domains like activestate.foo.com, but thats highly unlikely. '''
filenm = os.path.join(location, '*%s*' % name)
files = glob.glob(filenm)
return files
def _findCookie(files, cookie_re):
''' Look through a group of files looking for a specific cookie,
when we find it return, which means the first one found '''
for file in files:
data = open(file, 'r').read()
m = cookie_re.search(data)
if m: return m.group(1)
def findIECookie(domain, cookie):
''' Finds the ASPN Cookie from IE cookie files '''
cookie_re = re.compile('%s\n(.*?)\n' % cookie)
try:
l = _getLocation()
except:
# just print a debug
print "Error pulling registry key"
return None
# Found the key, now find the files and look through them
f = _getCookieFiles(l, domain)
if f:
return _findCookie(f, cookie_re)
else:
print "No cookies for that domain found"
return None
if __name__=='__main__':
print _getLocation()
# print findIECookie(domain='kuro5hin', cookie='k5-new_session')
pass
| '''
Created on Mar 27, 2016
andy mckay crecipe http://code.activestate.com/recipes/80443/
@author: Noe
'''
from string import lower, find
import re, os, glob
import win32api, win32con
def _getLocation():
''' Looks through the registry to find the current users Cookie folder. This is the folder IE uses. '''
key = 'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
regkey = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, key, 0, win32con.KEY_ALL_ACCESS)
num = win32api.RegQueryInfoKey(regkey)[1]
for x in range(0, num):
k = win32api.RegEnumValue(regkey, x)
if k[0] == 'Cookies':
return k[1]
def _getCookieFiles(location, name):
''' Rummages through all the files in the cookie folder, and returns only the ones whose file name, contains name.
Name can be the domain, for example 'activestate' will return all cookies for activestate.
Unfortunately it will also return cookies for domains like activestate.foo.com, but thats highly unlikely. '''
filenm = os.path.join(location, '*%s*' % name)
files = glob.glob(filenm)
return files
def _findCookie(files, cookie_re):
''' Look through a group of files looking for a specific cookie,
when we find it return, which means the first one found '''
for file in files:
data = open(file, 'r').read()
m = cookie_re.search(data)
if m: return m.group(1)
def findIECookie(domain, cookie):
''' Finds the ASPN Cookie from IE cookie files '''
cookie_re = re.compile('%s\n(.*?)\n' % cookie)
try:
l = _getLocation()
except:
# just print a debug
print "Error pulling registry key"
return None
# Found the key, now find the files and look through them
f = _getCookieFiles(l, domain)
if f:
return _findCookie(f, cookie_re)
else:
print "No cookies for that domain found"
return None
if __name__=='__main__':
print _getLocation()
# print findIECookie(domain='kuro5hin', cookie='k5-new_session')
pass | en | 0.845259 | Created on Mar 27, 2016 andy mckay crecipe http://code.activestate.com/recipes/80443/ @author: Noe Looks through the registry to find the current users Cookie folder. This is the folder IE uses. Rummages through all the files in the cookie folder, and returns only the ones whose file name, contains name. Name can be the domain, for example 'activestate' will return all cookies for activestate. Unfortunately it will also return cookies for domains like activestate.foo.com, but thats highly unlikely. Look through a group of files looking for a specific cookie, when we find it return, which means the first one found Finds the ASPN Cookie from IE cookie files # just print a debug # Found the key, now find the files and look through them # print findIECookie(domain='kuro5hin', cookie='k5-new_session') | 2.774101 | 3 |
api_handler/xml_soccer.py | clwatkins/fpl_fun | 0 | 6633106 | """Reconstructs a football season fixture by fixture, outputting the table as it stood for every event.
Uses the football data API to provide raw match data.
Documentation here: http://www.xmlsoccer.com/FootballData.asmx
Key values:
EPL:
UEFA Champions League:
FA Cup:
FA Community Shield:
Get competitions: http://api.football-data.org/v2/competitions/
Get competition seasons: http://api.football-data.org/v2/competitions/2021
Get season info:
"""
import requests as rq
import pandas as pd
import os
from lxml import etree
import typing
class XmlSoccerRequest(object):
def __init__(self):
self.api_key = os.environ['XML_SOCCER_API_KEY']
self.api_url = 'http://www.xmlsoccer.com/FootballData.asmx/'
self.base_params = {'ApiKey': self.api_key}
def get(self, method:str='GetAllLeagues', **kwargs) -> typing.List[typing.Dict]:
r = rq.get(self.api_url + method,
params={**self.base_params, **kwargs})
if r.status_code != 200:
raise ConnectionError(r.text)
else:
return self._process_xml(r)
@staticmethod
def _process_xml(response: rq.Response) -> typing.List[typing.Dict]:
data = []
try:
root = etree.XML(response.text.encode('utf-8'))
except SyntaxError:
raise SyntaxError(response.text)
if len(root) == 0:
raise (ConnectionError(root.text))
for child in list(root):
tmp = dict()
for element in list(child):
tmp[element.tag] = element.text
data.append(tmp)
return data
def get_season_matches(league_code: str, season_date_string: str):
"""Downloads matches from a particular competition and season into a dataframe.
# Common competition codes
E.g. for the Premier League 17-18 season I would use:
competition_code=2021
season_name='1718'
:param league_code: int
:param season_date_string: str
:return: df containing all of the competition season match information
"""
season_matches_raw = XmlSoccerRequest().get('GetFixturesByLeagueAndSeason',
league=league_code,
seasonDateString=season_date_string)
# season_detail_df = pd.DataFrame.from_records(season_matches_raw)
# season_detail_df['MatchDate'] = pd.to_datetime(season_detail_df.Date)
# season_detail_df['CompetitionSeason'] = season_date_string
# season_detail_df['CompetitionName'] = season_matches_raw[0]['League']
# return season_matches_raw, season_detail_df
return season_matches_raw
def process_season_matches(season_detail_df: pd.DataFrame):
"""Processes raw season match data into parsable match and table data.
:param season_detail_df: Dataframe as returned by get_season_matches function
:return: 3 dataframes: expanded_df with match info, table_df with match outcome info, and grouped_table_df
with a view of the league table week on week through the season
"""
def create_table_records(row):
# TODO handle null values better (don't contaminate df)
home_row_data = dict()
home_row_data['TeamName'] = row.HomeTeam
home_row_data['HomeOrAway'] = 'Home'
home_row_data['MatchOpponent'] = row.AwayTeam
home_row_data['GoalsFor'] = float(row.HomeGoals)
home_row_data['GoalsAgainst'] = float(row.AwayGoals)
home_row_data['MatchDay'] = float(row.Round)
home_row_data['MatchId'] = row.Id
home_row_data['GoalDiff'] = float(row.HomeGoals) - float(row.AwayGoals)
home_row_data['GamesPlayed'] = 1
home_row_data['CompetitionSeason'] = row.CompetitionSeason
home_row_data['CompetitionName'] = row.CompetitionName
home_row_data['MatchDate'] = row.MatchDate
if home_row_data['GoalDiff'] > 0:
points = 3
home_row_data['GamesWon'] = 1
elif home_row_data['GoalDiff'] == 0:
points = 1
home_row_data['GamesDrawn'] = 1
else:
points = 0
home_row_data['GamesLost'] = 1
home_row_data['Points'] = points
# repeat for away team
away_row_data = dict()
away_row_data['TeamName'] = row.AwayTeam
away_row_data['HomeOrAway'] = 'Away'
away_row_data['MatchOpponent'] = row.HomeTeam
away_row_data['GoalsFor'] = float(row.AwayGoals)
away_row_data['GoalsAgainst'] = float(row.HomeGoals)
away_row_data['MatchDay'] = float(row.Round)
away_row_data['MatchId'] = row.Id
away_row_data['GoalDiff'] = float(row.AwayGoals) - float(row.HomeGoals)
away_row_data['GamesPlayed'] = 1
away_row_data['CompetitionSeason'] = row.CompetitionSeason
away_row_data['CompetitionName'] = row.CompetitionName
away_row_data['MatchDate'] = row.MatchDate
if away_row_data['GoalDiff'] > 0:
points = 3
away_row_data['GamesWon'] = 1
elif away_row_data['GoalDiff'] == 0:
points = 1
away_row_data['GamesDrawn'] = 1
else:
points = 0
away_row_data['GamesLost'] = 1
away_row_data['Points'] = points
return [home_row_data, away_row_data]
season_dropped_df = season_detail_df.dropna(thresh=10) # drop only records that are substantively blank
matches_records = season_dropped_df.apply(create_table_records, axis=1)
table_df_flat_list = [l for sublist in matches_records for l in sublist]
matches_df = pd.DataFrame.from_records(table_df_flat_list)
table_df = matches_df.groupby(['MatchDay', 'TeamName']).sum().groupby('TeamName').cumsum()\
.sort_values(by=['MatchDay', 'Points', 'GoalDiff'])
table_df = table_df.join(table_df.groupby('MatchDay').rank('average'), rsuffix='_Rank') # add relative ranking
return matches_df, table_df
| """Reconstructs a football season fixture by fixture, outputting the table as it stood for every event.
Uses the football data API to provide raw match data.
Documentation here: http://www.xmlsoccer.com/FootballData.asmx
Key values:
EPL:
UEFA Champions League:
FA Cup:
FA Community Shield:
Get competitions: http://api.football-data.org/v2/competitions/
Get competition seasons: http://api.football-data.org/v2/competitions/2021
Get season info:
"""
import requests as rq
import pandas as pd
import os
from lxml import etree
import typing
class XmlSoccerRequest(object):
def __init__(self):
self.api_key = os.environ['XML_SOCCER_API_KEY']
self.api_url = 'http://www.xmlsoccer.com/FootballData.asmx/'
self.base_params = {'ApiKey': self.api_key}
def get(self, method:str='GetAllLeagues', **kwargs) -> typing.List[typing.Dict]:
r = rq.get(self.api_url + method,
params={**self.base_params, **kwargs})
if r.status_code != 200:
raise ConnectionError(r.text)
else:
return self._process_xml(r)
@staticmethod
def _process_xml(response: rq.Response) -> typing.List[typing.Dict]:
data = []
try:
root = etree.XML(response.text.encode('utf-8'))
except SyntaxError:
raise SyntaxError(response.text)
if len(root) == 0:
raise (ConnectionError(root.text))
for child in list(root):
tmp = dict()
for element in list(child):
tmp[element.tag] = element.text
data.append(tmp)
return data
def get_season_matches(league_code: str, season_date_string: str):
"""Downloads matches from a particular competition and season into a dataframe.
# Common competition codes
E.g. for the Premier League 17-18 season I would use:
competition_code=2021
season_name='1718'
:param league_code: int
:param season_date_string: str
:return: df containing all of the competition season match information
"""
season_matches_raw = XmlSoccerRequest().get('GetFixturesByLeagueAndSeason',
league=league_code,
seasonDateString=season_date_string)
# season_detail_df = pd.DataFrame.from_records(season_matches_raw)
# season_detail_df['MatchDate'] = pd.to_datetime(season_detail_df.Date)
# season_detail_df['CompetitionSeason'] = season_date_string
# season_detail_df['CompetitionName'] = season_matches_raw[0]['League']
# return season_matches_raw, season_detail_df
return season_matches_raw
def process_season_matches(season_detail_df: pd.DataFrame):
"""Processes raw season match data into parsable match and table data.
:param season_detail_df: Dataframe as returned by get_season_matches function
:return: 3 dataframes: expanded_df with match info, table_df with match outcome info, and grouped_table_df
with a view of the league table week on week through the season
"""
def create_table_records(row):
# TODO handle null values better (don't contaminate df)
home_row_data = dict()
home_row_data['TeamName'] = row.HomeTeam
home_row_data['HomeOrAway'] = 'Home'
home_row_data['MatchOpponent'] = row.AwayTeam
home_row_data['GoalsFor'] = float(row.HomeGoals)
home_row_data['GoalsAgainst'] = float(row.AwayGoals)
home_row_data['MatchDay'] = float(row.Round)
home_row_data['MatchId'] = row.Id
home_row_data['GoalDiff'] = float(row.HomeGoals) - float(row.AwayGoals)
home_row_data['GamesPlayed'] = 1
home_row_data['CompetitionSeason'] = row.CompetitionSeason
home_row_data['CompetitionName'] = row.CompetitionName
home_row_data['MatchDate'] = row.MatchDate
if home_row_data['GoalDiff'] > 0:
points = 3
home_row_data['GamesWon'] = 1
elif home_row_data['GoalDiff'] == 0:
points = 1
home_row_data['GamesDrawn'] = 1
else:
points = 0
home_row_data['GamesLost'] = 1
home_row_data['Points'] = points
# repeat for away team
away_row_data = dict()
away_row_data['TeamName'] = row.AwayTeam
away_row_data['HomeOrAway'] = 'Away'
away_row_data['MatchOpponent'] = row.HomeTeam
away_row_data['GoalsFor'] = float(row.AwayGoals)
away_row_data['GoalsAgainst'] = float(row.HomeGoals)
away_row_data['MatchDay'] = float(row.Round)
away_row_data['MatchId'] = row.Id
away_row_data['GoalDiff'] = float(row.AwayGoals) - float(row.HomeGoals)
away_row_data['GamesPlayed'] = 1
away_row_data['CompetitionSeason'] = row.CompetitionSeason
away_row_data['CompetitionName'] = row.CompetitionName
away_row_data['MatchDate'] = row.MatchDate
if away_row_data['GoalDiff'] > 0:
points = 3
away_row_data['GamesWon'] = 1
elif away_row_data['GoalDiff'] == 0:
points = 1
away_row_data['GamesDrawn'] = 1
else:
points = 0
away_row_data['GamesLost'] = 1
away_row_data['Points'] = points
return [home_row_data, away_row_data]
season_dropped_df = season_detail_df.dropna(thresh=10) # drop only records that are substantively blank
matches_records = season_dropped_df.apply(create_table_records, axis=1)
table_df_flat_list = [l for sublist in matches_records for l in sublist]
matches_df = pd.DataFrame.from_records(table_df_flat_list)
table_df = matches_df.groupby(['MatchDay', 'TeamName']).sum().groupby('TeamName').cumsum()\
.sort_values(by=['MatchDay', 'Points', 'GoalDiff'])
table_df = table_df.join(table_df.groupby('MatchDay').rank('average'), rsuffix='_Rank') # add relative ranking
return matches_df, table_df
| en | 0.813287 | Reconstructs a football season fixture by fixture, outputting the table as it stood for every event. Uses the football data API to provide raw match data. Documentation here: http://www.xmlsoccer.com/FootballData.asmx Key values: EPL: UEFA Champions League: FA Cup: FA Community Shield: Get competitions: http://api.football-data.org/v2/competitions/ Get competition seasons: http://api.football-data.org/v2/competitions/2021 Get season info: Downloads matches from a particular competition and season into a dataframe. # Common competition codes E.g. for the Premier League 17-18 season I would use: competition_code=2021 season_name='1718' :param league_code: int :param season_date_string: str :return: df containing all of the competition season match information # season_detail_df = pd.DataFrame.from_records(season_matches_raw) # season_detail_df['MatchDate'] = pd.to_datetime(season_detail_df.Date) # season_detail_df['CompetitionSeason'] = season_date_string # season_detail_df['CompetitionName'] = season_matches_raw[0]['League'] # return season_matches_raw, season_detail_df Processes raw season match data into parsable match and table data. :param season_detail_df: Dataframe as returned by get_season_matches function :return: 3 dataframes: expanded_df with match info, table_df with match outcome info, and grouped_table_df with a view of the league table week on week through the season # TODO handle null values better (don't contaminate df) # repeat for away team # drop only records that are substantively blank # add relative ranking | 3.4146 | 3 |
kwargs.py | taccisum/py_base_learn | 0 | 6633107 | <reponame>taccisum/py_base_learn<gh_stars>0
#coding=utf-8
print '使用kwargs定义一个字典初始化函数'
def dict_init(**kwargs):
return kwargs
print '初始化一个字典', dict_init(a=1, b=2, c='3')
| #coding=utf-8
print '使用kwargs定义一个字典初始化函数'
def dict_init(**kwargs):
return kwargs
print '初始化一个字典', dict_init(a=1, b=2, c='3') | en | 0.655248 | #coding=utf-8 | 2.708512 | 3 |
gtk3/intro/intro-widget.py | RobertoRosa7/python | 0 | 6633108 | <filename>gtk3/intro/intro-widget.py<gh_stars>0
# -*- coding: utf-8 -*-
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
default_size = {'width': 320, 'height': 568}
class Window(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Window")
self.set_default_size(default_size['width'], default_size['height'])
label = Gtk.Label()
label.set_label("This is a Basic Window")
# # label.set_angle(50)
# # label.set_halign(Gtk.Align.START) # print(dir(Gtk.Align))
self.add(label)
class AboutDialog(Gtk.AboutDialog):
def __init__(self):
Gtk.AboutDialog.__init__(self)
# self.set_default_size(default_size['width'], default_size['height'])
class AccelLabel(Gtk.AccelLabel):
def __init__(self):
Gtk.AccelLabel.__init__(self)
class ActionBar(Gtk.ActionBar):
def __init__(self):
Gtk.ActionBar.__init__(self)
class AppChooserButton(Gtk.AppChooserButton):
def __init__(self):
Gtk.AppChooserButton.__init__(self)
var = Window()
# var = AboutDialog()
# var = AppChooserButton()
var.connect("destroy", Gtk.main_quit)
var.show_all()
Gtk.main() | <filename>gtk3/intro/intro-widget.py<gh_stars>0
# -*- coding: utf-8 -*-
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
default_size = {'width': 320, 'height': 568}
class Window(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Window")
self.set_default_size(default_size['width'], default_size['height'])
label = Gtk.Label()
label.set_label("This is a Basic Window")
# # label.set_angle(50)
# # label.set_halign(Gtk.Align.START) # print(dir(Gtk.Align))
self.add(label)
class AboutDialog(Gtk.AboutDialog):
def __init__(self):
Gtk.AboutDialog.__init__(self)
# self.set_default_size(default_size['width'], default_size['height'])
class AccelLabel(Gtk.AccelLabel):
def __init__(self):
Gtk.AccelLabel.__init__(self)
class ActionBar(Gtk.ActionBar):
def __init__(self):
Gtk.ActionBar.__init__(self)
class AppChooserButton(Gtk.AppChooserButton):
def __init__(self):
Gtk.AppChooserButton.__init__(self)
var = Window()
# var = AboutDialog()
# var = AppChooserButton()
var.connect("destroy", Gtk.main_quit)
var.show_all()
Gtk.main() | en | 0.243742 | # -*- coding: utf-8 -*- # # label.set_angle(50) # # label.set_halign(Gtk.Align.START) # print(dir(Gtk.Align)) # self.set_default_size(default_size['width'], default_size['height']) # var = AboutDialog() # var = AppChooserButton() | 2.772689 | 3 |
ingest-data-progs/ingest_cabi_data.py | georgetown-analytics/DC-Criminalistics | 0 | 6633109 | <reponame>georgetown-analytics/DC-Criminalistics
'''
Import Libraries
'''
import json
import pandas as pd
import requests
import sqlite3
'''
Pull CABI station locations from https://gbfs.capitalbikeshare.com/gbfs/en/station_information.json
'''
def cabiWebPull():
url = 'https://gbfs.capitalbikeshare.com/gbfs/en/station_information.json'
response = requests.get(url)
data = response.json()
cabi_data_df = pd.io.json.json_normalize(data['data']['stations'])
return cabi_data_df
"""
Write to Database Table
"""
def writeDatabaseFile(dataframe):
#Connect to DB table.
conn = sqlite3.connect('../data/cabi-station-data/cabi_station_data.db')
c = conn.cursor()
#Drop table if exists.
c.execute("drop table if exists cabi_station_data")
dataframe = dataframe.drop(columns=['eightd_station_services','eightd_has_key_dispenser', 'rental_methods'])
dataframe.to_sql('cabi_station_data',conn)
#Commit and close connection.
conn.commit()
conn.close()
"""
The driver function.
"""
def main():
cabi_locations = cabiWebPull()
writeDatabaseFile(cabi_locations)
if __name__ == "__main__":
main()
| '''
Import Libraries
'''
import json
import pandas as pd
import requests
import sqlite3
'''
Pull CABI station locations from https://gbfs.capitalbikeshare.com/gbfs/en/station_information.json
'''
def cabiWebPull():
url = 'https://gbfs.capitalbikeshare.com/gbfs/en/station_information.json'
response = requests.get(url)
data = response.json()
cabi_data_df = pd.io.json.json_normalize(data['data']['stations'])
return cabi_data_df
"""
Write to Database Table
"""
def writeDatabaseFile(dataframe):
#Connect to DB table.
conn = sqlite3.connect('../data/cabi-station-data/cabi_station_data.db')
c = conn.cursor()
#Drop table if exists.
c.execute("drop table if exists cabi_station_data")
dataframe = dataframe.drop(columns=['eightd_station_services','eightd_has_key_dispenser', 'rental_methods'])
dataframe.to_sql('cabi_station_data',conn)
#Commit and close connection.
conn.commit()
conn.close()
"""
The driver function.
"""
def main():
cabi_locations = cabiWebPull()
writeDatabaseFile(cabi_locations)
if __name__ == "__main__":
main() | en | 0.829162 | Import Libraries Pull CABI station locations from https://gbfs.capitalbikeshare.com/gbfs/en/station_information.json Write to Database Table #Connect to DB table. #Drop table if exists. #Commit and close connection. The driver function. | 3.509795 | 4 |
aquecimento/python/B.py | Joao-Norberto/SBC-XXV-maratona-de-progrmacao | 0 | 6633110 | #retorna o fatorial de um número
def fatorial(n):
fat = 1
for i in range(n, 1, -1):
fat = fat * i
return fat
#retorna o fatorial mais próximo de um número
def fatorial_min(n):
X = int(1)
while fatorial(X) <= N:
X = X + 1
return X - 1
N = int(input())
qtd_fatoriais = int(0)
while N > 0:
N = N - fatorial(fatorial_min(N))
qtd_fatoriais = qtd_fatoriais + 1
print(qtd_fatoriais)
| #retorna o fatorial de um número
def fatorial(n):
fat = 1
for i in range(n, 1, -1):
fat = fat * i
return fat
#retorna o fatorial mais próximo de um número
def fatorial_min(n):
X = int(1)
while fatorial(X) <= N:
X = X + 1
return X - 1
N = int(input())
qtd_fatoriais = int(0)
while N > 0:
N = N - fatorial(fatorial_min(N))
qtd_fatoriais = qtd_fatoriais + 1
print(qtd_fatoriais)
| pt | 0.92793 | #retorna o fatorial de um número #retorna o fatorial mais próximo de um número | 3.829415 | 4 |
bamboo/common_python/test_tools.py | jychoi-hpc/lbann | 0 | 6633111 | <reponame>jychoi-hpc/lbann
import pytest
import subprocess
import tools
# This test file isn't in a directory to be run from Bamboo
# Run locally with python -m pytest -s
d = dict(
executable='exe',
num_nodes=20,
partition='pdebug',
time_limit=30,
num_processes=40,
dir_name='dir',
data_filedir_default='lscratchh/filedir',
data_reader_name='mnist',
data_reader_percent=0.10,
exit_after_setup=True,
mini_batch_size=15,
model_folder='models/folder',
model_name='lenet',
num_epochs=7,
optimizer_name='adagrad',
processes_per_model=10,
extra_lbann_flags={'block_size': 4, 'print_affinity': None},
output_file_name='output_file',
error_file_name='error_file',
check_executable_existence=False)
def test_command_catalyst():
actual = tools.get_command(cluster='catalyst', **d)
expected = 'salloc --nodes=20 --partition=pdebug --time=30 srun --mpibind=off --time=30 --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
def test_command_corona():
actual = tools.get_command(cluster='corona', **d)
expected = 'salloc --nodes=20 --partition=pdebug --time=30 srun --mpibind=off --time=30 --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
def test_command_lassen():
actual = tools.get_command(cluster='lassen', **d)
expected = 'bsub -G guests -Is -q pdebug -nnodes 20 -W 30 jsrun -b "packed:10" -c 40 -g 4 -d packed -n 16 -r 1 -a 4 exe --data_filedir=gpfs1/filedir --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
def test_command_pascal():
actual = tools.get_command(cluster='pascal', **d)
expected = 'salloc --nodes=20 --partition=pbatch --time=30 srun --mpibind=off --time=30 --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
def test_command_ray():
actual = tools.get_command(cluster='ray', **d)
expected = 'bsub -x -G guests -Is -n 40 -q pdebug -R "span[ptile=2]" -W 30 mpirun --timeout=30 -np 40 -N 2 exe --data_filedir=gscratchr/filedir --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
# Test error cases ############################################################
def test_blacklisted_substrings_1():
try:
tools.get_command('ray', 'exe', partition=';',
optimizer_path='--model=new_model',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid character(s): ; contains ; , --model=new_model contains --'
assert actual == expected
def test_blacklisted_substrings_2():
try:
tools.get_command('ray', 'exe', partition='pdebug',
extra_lbann_flags={'--bad_key': 5},
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid character(s): --bad_key contains --'
assert actual == expected
def test_blacklisted_substrings_3():
try:
tools.get_command('ray', 'exe', partition='pdebug',
extra_lbann_flags={'key': '--bad_value'},
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid character(s): --bad_value contains --'
assert actual == expected
def test_unsupported_cluster():
try:
tools.get_command('q', 'exe', check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Unsupported Cluster: q'
assert actual == expected
def test_bad_model_1():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder',
model_name='name', model_path='path',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name'
assert actual == expected
def test_bad_model_2():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder',
model_path='path', check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name'
assert actual == expected
def test_bad_model_3():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_name='name',
model_path='path', check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name'
assert actual == expected
def test_bad_model_4():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_folder set but not model_name.'
assert actual == expected
def test_bad_model_5():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_name set but not model_folder.'
assert actual == expected
def test_bad_data_reader():
try:
tools.get_command('catalyst', 'exe', dir_name='dir',
data_reader_name='name', data_reader_path='path',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_reader_path is set but so is data_reader_name , data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.'
assert actual == expected
def test_bad_optimizer():
try:
tools.get_command('ray', 'exe', dir_name='dir', optimizer_name='name',
optimizer_path='path',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: optimizer_path is set but so is optimizer_name'
assert actual == expected
def test_bad_dir_name_1():
try:
tools.get_command('ray', 'exe', dir_name='dir',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name set but none of model_folder, model_name, data_reader_name, optimizer_name are.'
assert actual == expected
def test_bad_dir_name_2():
try:
tools.get_command('ray', 'exe', model_folder='folder',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.'
assert actual == expected
def test_bad_dir_name_3():
try:
tools.get_command('ray', 'exe', model_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.'
assert actual == expected
def test_bad_dir_name_4():
try:
tools.get_command('catalyst', 'exe', data_reader_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is. , data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.'
assert actual == expected
def test_bad_dir_name_5():
try:
tools.get_command('ray', 'exe', optimizer_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.'
assert actual == expected
def test_bad_data_filedir_1():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
data_filedir_default='filedir',
data_filedir_train_default='a',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_2():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
data_filedir_default='filedir',
data_filename_train_default='b',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_3():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
data_filedir_default='filedir',
data_filedir_test_default='c',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_4():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
data_filedir_default='filedir',
data_filename_test_default='d',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_5():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
data_filedir_default='filedir',
data_filedir_train_default='e',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_6():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
data_filedir_default='filedir',
data_filename_train_default='f',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_7():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
data_filedir_default='filedir',
data_filedir_test_default='g',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_8():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
data_filedir_default='filedir',
data_filename_test_default='h',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_9():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.'
assert actual == expected
def test_bad_data_filedir_10():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.'
assert actual == expected
def test_bad_data_filedir_11():
try:
tools.get_command('ray', 'exe', data_filedir_default='filedir',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_filedir_default set but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_data_filedir_12():
try:
tools.get_command('ray', 'exe', data_filedir_train_default='a',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_data_filedir_13():
try:
tools.get_command('ray', 'exe', data_filename_train_default='b',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_data_filedir_14():
try:
tools.get_command('ray', 'exe', data_filedir_test_default='c',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_data_filedir_15():
try:
tools.get_command('ray', 'exe', data_filename_test_default='e',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_extra_lbann_flags_invalid_flag():
try:
tools.get_command('ray', 'exe', partition='pdebug',
extra_lbann_flags={'invalid_flag': 'value'},
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = ("Invalid Usage: extra_lbann_flags includes invalid"
" flag=invalid_flag. Flags must"
" be in ['block_size', 'procs_per_trainer', 'num_gpus',"
" 'num_parallel_readers', 'num_io_threads', 'serialize_io',"
" 'disable_background_io_activity', 'disable_cuda',"
" 'random_seed', 'objective_function', 'data_layout',"
" 'print_affinity', 'use_data_store', 'preload_data_store',"
" 'super_node', 'write_sample_list', 'ltfb_verbose',"
" 'index_list_train', 'index_list_test',"
" 'label_filename_train', 'label_filename_test',"
" 'share_testing_data_readers', 'image_dir', 'no_im_comm']."
)
assert actual == expected
def test_bad_extra_lbann_flags_not_a_dict():
try:
tools.get_command('ray', 'exe', partition='pdebug',
extra_lbann_flags='invalid_flag',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = (
'Invalid Usage: extra_lbann_flags must be a dict e.g. `{flag :'
' None, flag: 4}`. Use `None` if a flag has no value attached '
'to it.')
assert actual == expected
| import pytest
import subprocess
import tools
# This test file isn't in a directory to be run from Bamboo
# Run locally with python -m pytest -s
d = dict(
executable='exe',
num_nodes=20,
partition='pdebug',
time_limit=30,
num_processes=40,
dir_name='dir',
data_filedir_default='lscratchh/filedir',
data_reader_name='mnist',
data_reader_percent=0.10,
exit_after_setup=True,
mini_batch_size=15,
model_folder='models/folder',
model_name='lenet',
num_epochs=7,
optimizer_name='adagrad',
processes_per_model=10,
extra_lbann_flags={'block_size': 4, 'print_affinity': None},
output_file_name='output_file',
error_file_name='error_file',
check_executable_existence=False)
def test_command_catalyst():
actual = tools.get_command(cluster='catalyst', **d)
expected = 'salloc --nodes=20 --partition=pdebug --time=30 srun --mpibind=off --time=30 --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
def test_command_corona():
actual = tools.get_command(cluster='corona', **d)
expected = 'salloc --nodes=20 --partition=pdebug --time=30 srun --mpibind=off --time=30 --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
def test_command_lassen():
actual = tools.get_command(cluster='lassen', **d)
expected = 'bsub -G guests -Is -q pdebug -nnodes 20 -W 30 jsrun -b "packed:10" -c 40 -g 4 -d packed -n 16 -r 1 -a 4 exe --data_filedir=gpfs1/filedir --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
def test_command_pascal():
actual = tools.get_command(cluster='pascal', **d)
expected = 'salloc --nodes=20 --partition=pbatch --time=30 srun --mpibind=off --time=30 --ntasks=40 exe --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
def test_command_ray():
actual = tools.get_command(cluster='ray', **d)
expected = 'bsub -x -G guests -Is -n 40 -q pdebug -R "span[ptile=2]" -W 30 mpirun --timeout=30 -np 40 -N 2 exe --data_filedir=gscratchr/filedir --reader=dir/model_zoo/data_readers/data_reader_mnist.prototext --data_reader_percent=0.100000 --exit_after_setup --mini_batch_size=15 --model=dir/model_zoo/models/folder/model_lenet.prototext --num_epochs=7 --optimizer=dir/model_zoo/optimizers/opt_adagrad.prototext --procs_per_model=10 --block_size=4 --print_affinity > output_file 2> error_file'
assert actual == expected
# Test error cases ############################################################
def test_blacklisted_substrings_1():
try:
tools.get_command('ray', 'exe', partition=';',
optimizer_path='--model=new_model',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid character(s): ; contains ; , --model=new_model contains --'
assert actual == expected
def test_blacklisted_substrings_2():
try:
tools.get_command('ray', 'exe', partition='pdebug',
extra_lbann_flags={'--bad_key': 5},
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid character(s): --bad_key contains --'
assert actual == expected
def test_blacklisted_substrings_3():
try:
tools.get_command('ray', 'exe', partition='pdebug',
extra_lbann_flags={'key': '--bad_value'},
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid character(s): --bad_value contains --'
assert actual == expected
def test_unsupported_cluster():
try:
tools.get_command('q', 'exe', check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Unsupported Cluster: q'
assert actual == expected
def test_bad_model_1():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder',
model_name='name', model_path='path',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name'
assert actual == expected
def test_bad_model_2():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder',
model_path='path', check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name'
assert actual == expected
def test_bad_model_3():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_name='name',
model_path='path', check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_path is set but so is at least one of model folder and model_name'
assert actual == expected
def test_bad_model_4():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_folder='folder',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_folder set but not model_name.'
assert actual == expected
def test_bad_model_5():
try:
tools.get_command('ray', 'exe', dir_name='dir', model_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: model_name set but not model_folder.'
assert actual == expected
def test_bad_data_reader():
try:
tools.get_command('catalyst', 'exe', dir_name='dir',
data_reader_name='name', data_reader_path='path',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_reader_path is set but so is data_reader_name , data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.'
assert actual == expected
def test_bad_optimizer():
try:
tools.get_command('ray', 'exe', dir_name='dir', optimizer_name='name',
optimizer_path='path',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: optimizer_path is set but so is optimizer_name'
assert actual == expected
def test_bad_dir_name_1():
try:
tools.get_command('ray', 'exe', dir_name='dir',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name set but none of model_folder, model_name, data_reader_name, optimizer_name are.'
assert actual == expected
def test_bad_dir_name_2():
try:
tools.get_command('ray', 'exe', model_folder='folder',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.'
assert actual == expected
def test_bad_dir_name_3():
try:
tools.get_command('ray', 'exe', model_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.'
assert actual == expected
def test_bad_dir_name_4():
try:
tools.get_command('catalyst', 'exe', data_reader_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is. , data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.'
assert actual == expected
def test_bad_dir_name_5():
try:
tools.get_command('ray', 'exe', optimizer_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: dir_name is not set but at least one of model_folder, model_name, data_reader_name, optimizer_name is.'
assert actual == expected
def test_bad_data_filedir_1():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
data_filedir_default='filedir',
data_filedir_train_default='a',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_2():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
data_filedir_default='filedir',
data_filename_train_default='b',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_3():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
data_filedir_default='filedir',
data_filedir_test_default='c',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_4():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
data_filedir_default='filedir',
data_filename_test_default='d',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_5():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
data_filedir_default='filedir',
data_filedir_train_default='e',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_6():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
data_filedir_default='filedir',
data_filename_train_default='f',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_7():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
data_filedir_default='filedir',
data_filedir_test_default='g',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_8():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
data_filedir_default='filedir',
data_filename_test_default='h',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_fildir_default set but so is at least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default]'
assert actual == expected
def test_bad_data_filedir_9():
try:
tools.get_command('ray', 'exe', dir_name='dir', data_reader_name='name',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.'
assert actual == expected
def test_bad_data_filedir_10():
try:
tools.get_command('ray', 'exe', data_reader_path='path',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_reader_name or data_reader_path is set but not data_filedir_default. If a data reader is provided, the default filedir must be set. This allows for determining what the filedir should be on each cluster. Alternatively, some or all of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] can be set.'
assert actual == expected
def test_bad_data_filedir_11():
try:
tools.get_command('ray', 'exe', data_filedir_default='filedir',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: data_filedir_default set but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_data_filedir_12():
try:
tools.get_command('ray', 'exe', data_filedir_train_default='a',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_data_filedir_13():
try:
tools.get_command('ray', 'exe', data_filename_train_default='b',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_data_filedir_14():
try:
tools.get_command('ray', 'exe', data_filedir_test_default='c',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_data_filedir_15():
try:
tools.get_command('ray', 'exe', data_filename_test_default='e',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = 'Invalid Usage: At least one of [data_filedir_train_default, data_filename_train_default, data_filedir_test_default, data_filename_test_default] is set, but neither data_reader_name or data_reader_path are.'
assert actual == expected
def test_bad_extra_lbann_flags_invalid_flag():
try:
tools.get_command('ray', 'exe', partition='pdebug',
extra_lbann_flags={'invalid_flag': 'value'},
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = ("Invalid Usage: extra_lbann_flags includes invalid"
" flag=invalid_flag. Flags must"
" be in ['block_size', 'procs_per_trainer', 'num_gpus',"
" 'num_parallel_readers', 'num_io_threads', 'serialize_io',"
" 'disable_background_io_activity', 'disable_cuda',"
" 'random_seed', 'objective_function', 'data_layout',"
" 'print_affinity', 'use_data_store', 'preload_data_store',"
" 'super_node', 'write_sample_list', 'ltfb_verbose',"
" 'index_list_train', 'index_list_test',"
" 'label_filename_train', 'label_filename_test',"
" 'share_testing_data_readers', 'image_dir', 'no_im_comm']."
)
assert actual == expected
def test_bad_extra_lbann_flags_not_a_dict():
try:
tools.get_command('ray', 'exe', partition='pdebug',
extra_lbann_flags='invalid_flag',
check_executable_existence=False)
assert False
except Exception as e:
actual = str(e)
expected = (
'Invalid Usage: extra_lbann_flags must be a dict e.g. `{flag :'
' None, flag: 4}`. Use `None` if a flag has no value attached '
'to it.')
assert actual == expected | en | 0.378049 | # This test file isn't in a directory to be run from Bamboo # Run locally with python -m pytest -s # Test error cases ############################################################ | 1.765574 | 2 |
index_select_assign/Conditional_Selection_2/Conditional_selection.py | CodeXfull/Pandas | 0 | 6633112 | import pandas as pd
base = pd.read_csv("./index_select_assign/access_data_1/census.csv")
# print(base.marital_status)
print(base.education == ' Bachelors') # True/False para o resultado
#Usando o ==
print(base.loc[base.marital_status == 'Divorced'])
print(base.loc[base.native_country == ' ?'])
#Usando o &
print((base.education == " Bachelors") & (base.age >=45))
# substituindo o loop e usando o |
print(base.loc[(base.age >=45)| (base.relationship == " Husband")])
| import pandas as pd
base = pd.read_csv("./index_select_assign/access_data_1/census.csv")
# print(base.marital_status)
print(base.education == ' Bachelors') # True/False para o resultado
#Usando o ==
print(base.loc[base.marital_status == 'Divorced'])
print(base.loc[base.native_country == ' ?'])
#Usando o &
print((base.education == " Bachelors") & (base.age >=45))
# substituindo o loop e usando o |
print(base.loc[(base.age >=45)| (base.relationship == " Husband")])
| pt | 0.733948 | # print(base.marital_status) # True/False para o resultado #Usando o == #Usando o & # substituindo o loop e usando o | | 3.760998 | 4 |
ingestors/support/pdf.py | simonwoerpel/ingest-file | 23 | 6633113 | <reponame>simonwoerpel/ingest-file<filename>ingestors/support/pdf.py
import uuid
from pdflib import Document
from followthemoney import model
from normality import collapse_spaces # noqa
from ingestors.support.ocr import OCRSupport
from ingestors.support.convert import DocumentConvertSupport
class PDFSupport(DocumentConvertSupport, OCRSupport):
"""Provides helpers for PDF file context extraction."""
def pdf_extract(self, entity, pdf):
"""Extract pages and page text from a PDF file."""
entity.schema = model.get("Pages")
temp_dir = self.make_empty_directory()
for page in pdf:
self.pdf_extract_page(entity, temp_dir, page)
def pdf_alternative_extract(self, entity, pdf_path):
checksum = self.manager.store(pdf_path)
entity.set("pdfHash", checksum)
pdf = Document(bytes(pdf_path))
self.pdf_extract(entity, pdf)
def pdf_extract_page(self, document, temp_dir, page):
"""Extract the contents of a single PDF page, using OCR if need be."""
texts = page.lines
image_path = temp_dir.joinpath(str(uuid.uuid4()))
page.extract_images(path=bytes(image_path), prefix=b"img")
languages = self.manager.context.get("languages")
for image_file in image_path.glob("*.png"):
with open(image_file, "rb") as fh:
data = fh.read()
text = self.extract_ocr_text(data, languages=languages)
if text is not None:
texts.append(text)
text = " \n".join(texts).strip()
entity = self.manager.make_entity("Page")
entity.make_id(document.id, page.page_no)
entity.set("document", document)
entity.set("index", page.page_no)
entity.add("bodyText", text)
self.manager.apply_context(entity, document)
self.manager.emit_entity(entity)
self.manager.emit_text_fragment(document, text, entity.id)
| import uuid
from pdflib import Document
from followthemoney import model
from normality import collapse_spaces # noqa
from ingestors.support.ocr import OCRSupport
from ingestors.support.convert import DocumentConvertSupport
class PDFSupport(DocumentConvertSupport, OCRSupport):
"""Provides helpers for PDF file context extraction."""
def pdf_extract(self, entity, pdf):
"""Extract pages and page text from a PDF file."""
entity.schema = model.get("Pages")
temp_dir = self.make_empty_directory()
for page in pdf:
self.pdf_extract_page(entity, temp_dir, page)
def pdf_alternative_extract(self, entity, pdf_path):
checksum = self.manager.store(pdf_path)
entity.set("pdfHash", checksum)
pdf = Document(bytes(pdf_path))
self.pdf_extract(entity, pdf)
def pdf_extract_page(self, document, temp_dir, page):
"""Extract the contents of a single PDF page, using OCR if need be."""
texts = page.lines
image_path = temp_dir.joinpath(str(uuid.uuid4()))
page.extract_images(path=bytes(image_path), prefix=b"img")
languages = self.manager.context.get("languages")
for image_file in image_path.glob("*.png"):
with open(image_file, "rb") as fh:
data = fh.read()
text = self.extract_ocr_text(data, languages=languages)
if text is not None:
texts.append(text)
text = " \n".join(texts).strip()
entity = self.manager.make_entity("Page")
entity.make_id(document.id, page.page_no)
entity.set("document", document)
entity.set("index", page.page_no)
entity.add("bodyText", text)
self.manager.apply_context(entity, document)
self.manager.emit_entity(entity)
self.manager.emit_text_fragment(document, text, entity.id) | en | 0.623401 | # noqa Provides helpers for PDF file context extraction. Extract pages and page text from a PDF file. Extract the contents of a single PDF page, using OCR if need be. | 2.677114 | 3 |
gym_gridverse/representations/state_representations.py | DavidSlayback/gym-gridverse | 6 | 6633114 | from typing import Dict
import numpy as np
from gym_gridverse.debugging import gv_debug
from gym_gridverse.representations.representation import (
StateRepresentation,
default_convert,
default_representation_space,
no_overlap_convert,
no_overlap_representation_space,
)
from gym_gridverse.representations.spaces import Space
from gym_gridverse.spaces import StateSpace
from gym_gridverse.state import State
class DefaultStateRepresentation(StateRepresentation):
"""The default representation for state
Simply returns the state as indices. See
:func:`gym_gridverse.representations.representation.default_representation_space`
and :func:`gym_gridverse.representations.representation.default_convert`
for more information
"""
@property
def space(self) -> Dict[str, Space]:
max_type_index = self.state_space.max_agent_object_type
max_state_index = self.state_space.max_grid_object_status
max_color_value = self.state_space.max_object_color
return default_representation_space(
max_type_index,
max_state_index,
max_color_value,
self.state_space.grid_shape.width,
self.state_space.grid_shape.height,
)
def convert(self, s: State) -> Dict[str, np.ndarray]:
if gv_debug() and not self.state_space.contains(s):
raise ValueError('state_space does not contain state')
return default_convert(s.grid, s.agent)
class NoOverlapStateRepresentation(StateRepresentation):
"""Representation that ensures that the numbers represent unique things
Simply returns the state as indices, except that channels do not
overlap. See
`gym_gridverse.representations.representation.no_overlap_representation_space`
and `gym_gridverse.representations.representation.no_overlap_convert` for
more information
"""
@property
def space(self) -> Dict[str, np.ndarray]:
max_type_index = self.state_space.max_grid_object_type
max_state_index = self.state_space.max_grid_object_status
max_color_value = self.state_space.max_object_color
return no_overlap_representation_space(
max_type_index,
max_state_index,
max_color_value,
self.state_space.grid_shape.width,
self.state_space.grid_shape.height,
)
def convert(self, s: State) -> Dict[str, np.ndarray]:
if gv_debug() and not self.state_space.contains(s):
raise ValueError('state_space does not contain space')
max_type_index = self.state_space.max_grid_object_type
max_state_index = self.state_space.max_grid_object_status
return no_overlap_convert(
s.grid, s.agent, max_type_index, max_state_index
)
class CompactStateRepresentation(StateRepresentation):
"""Returns state as indices but 'not sparse'
Will jump over unused indices to allow for smaller spaces
TODO: implement
"""
def __init__(self, state_space: StateSpace):
super().__init__(state_space)
raise NotImplementedError
def create_state_representation(
name: str, state_space: StateSpace
) -> StateRepresentation:
"""Factory function for state representations
Returns:
Representation:
"""
# TODO: test
if name == 'default':
return DefaultStateRepresentation(state_space)
if name == 'no_overlap':
return NoOverlapStateRepresentation(state_space)
if name == 'compact':
raise NotImplementedError
raise ValueError(f'invalid name {name}')
| from typing import Dict
import numpy as np
from gym_gridverse.debugging import gv_debug
from gym_gridverse.representations.representation import (
StateRepresentation,
default_convert,
default_representation_space,
no_overlap_convert,
no_overlap_representation_space,
)
from gym_gridverse.representations.spaces import Space
from gym_gridverse.spaces import StateSpace
from gym_gridverse.state import State
class DefaultStateRepresentation(StateRepresentation):
"""The default representation for state
Simply returns the state as indices. See
:func:`gym_gridverse.representations.representation.default_representation_space`
and :func:`gym_gridverse.representations.representation.default_convert`
for more information
"""
@property
def space(self) -> Dict[str, Space]:
max_type_index = self.state_space.max_agent_object_type
max_state_index = self.state_space.max_grid_object_status
max_color_value = self.state_space.max_object_color
return default_representation_space(
max_type_index,
max_state_index,
max_color_value,
self.state_space.grid_shape.width,
self.state_space.grid_shape.height,
)
def convert(self, s: State) -> Dict[str, np.ndarray]:
if gv_debug() and not self.state_space.contains(s):
raise ValueError('state_space does not contain state')
return default_convert(s.grid, s.agent)
class NoOverlapStateRepresentation(StateRepresentation):
"""Representation that ensures that the numbers represent unique things
Simply returns the state as indices, except that channels do not
overlap. See
`gym_gridverse.representations.representation.no_overlap_representation_space`
and `gym_gridverse.representations.representation.no_overlap_convert` for
more information
"""
@property
def space(self) -> Dict[str, np.ndarray]:
max_type_index = self.state_space.max_grid_object_type
max_state_index = self.state_space.max_grid_object_status
max_color_value = self.state_space.max_object_color
return no_overlap_representation_space(
max_type_index,
max_state_index,
max_color_value,
self.state_space.grid_shape.width,
self.state_space.grid_shape.height,
)
def convert(self, s: State) -> Dict[str, np.ndarray]:
if gv_debug() and not self.state_space.contains(s):
raise ValueError('state_space does not contain space')
max_type_index = self.state_space.max_grid_object_type
max_state_index = self.state_space.max_grid_object_status
return no_overlap_convert(
s.grid, s.agent, max_type_index, max_state_index
)
class CompactStateRepresentation(StateRepresentation):
"""Returns state as indices but 'not sparse'
Will jump over unused indices to allow for smaller spaces
TODO: implement
"""
def __init__(self, state_space: StateSpace):
super().__init__(state_space)
raise NotImplementedError
def create_state_representation(
name: str, state_space: StateSpace
) -> StateRepresentation:
"""Factory function for state representations
Returns:
Representation:
"""
# TODO: test
if name == 'default':
return DefaultStateRepresentation(state_space)
if name == 'no_overlap':
return NoOverlapStateRepresentation(state_space)
if name == 'compact':
raise NotImplementedError
raise ValueError(f'invalid name {name}')
| en | 0.718417 | The default representation for state Simply returns the state as indices. See :func:`gym_gridverse.representations.representation.default_representation_space` and :func:`gym_gridverse.representations.representation.default_convert` for more information Representation that ensures that the numbers represent unique things Simply returns the state as indices, except that channels do not overlap. See `gym_gridverse.representations.representation.no_overlap_representation_space` and `gym_gridverse.representations.representation.no_overlap_convert` for more information Returns state as indices but 'not sparse' Will jump over unused indices to allow for smaller spaces TODO: implement Factory function for state representations Returns: Representation: # TODO: test | 2.569149 | 3 |
src/collectors/ip/test/testip.py | hermdog/Diamond | 1,795 | 6633115 | #!/usr/bin/env python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from ip import IPCollector
###############################################################################
class TestIPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('IPCollector', {
'allowed_names': allowed_names,
'interval': 1,
})
self.collector = IPCollector(config, None)
def test_import(self):
self.assertTrue(IPCollector)
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch('diamond.collector.Collector.publish')
def test_should_open_proc_net_snmp(self, publish_mock, open_mock):
IPCollector.PROC = ['/proc/net/snmp']
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/snmp')
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch('diamond.collector.Collector.publish')
def test_should_work_with_synthetic_data(self, publish_mock, open_mock):
IPCollector.PROC = ['/proc/net/snmp']
self.setUp(['A', 'C'])
open_mock.return_value = StringIO('''
Ip: A B C
Ip: 0 0 0
'''.strip())
self.collector.collect()
open_mock.return_value = StringIO('''
Ip: A B C
Ip: 0 1 2
'''.strip())
publish_mock.call_args_list = []
self.collector.collect()
self.assertEqual(len(publish_mock.call_args_list), 2)
metrics = {
'A': 0,
'C': 2,
}
self.assertPublishedMany(publish_mock, metrics)
@patch('diamond.collector.Collector.publish')
def test_should_work_with_real_data(self, publish_mock):
self.setUp(['InDiscards', 'InReceives', 'OutDiscards', 'OutRequests'])
IPCollector.PROC = [self.getFixturePath('proc_net_snmp_1')]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
IPCollector.PROC = [self.getFixturePath('proc_net_snmp_2')]
self.collector.collect()
metrics = {
'InDiscards': 0,
'InReceives': 2,
'OutDiscards': 0,
'OutRequests': 1,
}
self.assertPublishedMany(publish_mock, metrics)
@patch('diamond.collector.Collector.publish')
def test_should_work_with_all_data(self, publish_mock):
metrics = {
'Forwarding': 2,
'DefaultTTL': 64,
'InReceives': 2,
'InHdrErrors': 0,
'InAddrErrors': 0,
'ForwDatagrams': 0,
'InUnknownProtos': 0,
'InDiscards': 0,
'InDelivers': 2,
'OutRequests': 1,
'OutDiscards': 0,
'OutNoRoutes': 0,
'ReasmTimeout': 0,
'ReasmReqds': 0,
'ReasmOKs': 0,
'ReasmFails': 0,
'FragOKs': 0,
'FragFails': 0,
'FragCreates': 0,
}
self.setUp(allowed_names=metrics.keys())
IPCollector.PROC = [
self.getFixturePath('proc_net_snmp_1'),
]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
IPCollector.PROC = [
self.getFixturePath('proc_net_snmp_2'),
]
self.collector.collect()
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
###############################################################################
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from ip import IPCollector
###############################################################################
class TestIPCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('IPCollector', {
'allowed_names': allowed_names,
'interval': 1,
})
self.collector = IPCollector(config, None)
def test_import(self):
self.assertTrue(IPCollector)
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch('diamond.collector.Collector.publish')
def test_should_open_proc_net_snmp(self, publish_mock, open_mock):
IPCollector.PROC = ['/proc/net/snmp']
open_mock.return_value = StringIO('')
self.collector.collect()
open_mock.assert_called_once_with('/proc/net/snmp')
@patch('os.access', Mock(return_value=True))
@patch('__builtin__.open')
@patch('diamond.collector.Collector.publish')
def test_should_work_with_synthetic_data(self, publish_mock, open_mock):
IPCollector.PROC = ['/proc/net/snmp']
self.setUp(['A', 'C'])
open_mock.return_value = StringIO('''
Ip: A B C
Ip: 0 0 0
'''.strip())
self.collector.collect()
open_mock.return_value = StringIO('''
Ip: A B C
Ip: 0 1 2
'''.strip())
publish_mock.call_args_list = []
self.collector.collect()
self.assertEqual(len(publish_mock.call_args_list), 2)
metrics = {
'A': 0,
'C': 2,
}
self.assertPublishedMany(publish_mock, metrics)
@patch('diamond.collector.Collector.publish')
def test_should_work_with_real_data(self, publish_mock):
self.setUp(['InDiscards', 'InReceives', 'OutDiscards', 'OutRequests'])
IPCollector.PROC = [self.getFixturePath('proc_net_snmp_1')]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
IPCollector.PROC = [self.getFixturePath('proc_net_snmp_2')]
self.collector.collect()
metrics = {
'InDiscards': 0,
'InReceives': 2,
'OutDiscards': 0,
'OutRequests': 1,
}
self.assertPublishedMany(publish_mock, metrics)
@patch('diamond.collector.Collector.publish')
def test_should_work_with_all_data(self, publish_mock):
metrics = {
'Forwarding': 2,
'DefaultTTL': 64,
'InReceives': 2,
'InHdrErrors': 0,
'InAddrErrors': 0,
'ForwDatagrams': 0,
'InUnknownProtos': 0,
'InDiscards': 0,
'InDelivers': 2,
'OutRequests': 1,
'OutDiscards': 0,
'OutNoRoutes': 0,
'ReasmTimeout': 0,
'ReasmReqds': 0,
'ReasmOKs': 0,
'ReasmFails': 0,
'FragOKs': 0,
'FragFails': 0,
'FragCreates': 0,
}
self.setUp(allowed_names=metrics.keys())
IPCollector.PROC = [
self.getFixturePath('proc_net_snmp_1'),
]
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
IPCollector.PROC = [
self.getFixturePath('proc_net_snmp_2'),
]
self.collector.collect()
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
###############################################################################
if __name__ == '__main__':
unittest.main()
| de | 0.78886 | #!/usr/bin/env python # coding=utf-8 ############################################################################### ############################################################################### Ip: A B C Ip: 0 0 0 Ip: A B C Ip: 0 1 2 ############################################################################### | 2.119726 | 2 |
utils/misc/current_user.py | DNL-inc/bit | 1 | 6633116 | def get_current_user():
def decorator(func):
setattr(func, 'get_current_user', True)
return func
return decorator | def get_current_user():
def decorator(func):
setattr(func, 'get_current_user', True)
return func
return decorator | none | 1 | 2.694659 | 3 |
|
punky_gibbon/setup.py | SVilgelm/CloudFerry | 6 | 6633117 | # Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from setuptools import setup
setup(name='punky_gibbon',
description='WSGI middleware that generate HTTP errors most of the '
'time, but sometimes pass request to next '
'middleware/application',
url='https://github.com/MirantisWorkloadMobility/CloudFerry/tree/devel'
'/punky_gibbon',
author='<NAME>',
author_email='<EMAIL>',
license='Apache',
packages=[
'punky_gibbon',
])
| # Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from setuptools import setup
setup(name='punky_gibbon',
description='WSGI middleware that generate HTTP errors most of the '
'time, but sometimes pass request to next '
'middleware/application',
url='https://github.com/MirantisWorkloadMobility/CloudFerry/tree/devel'
'/punky_gibbon',
author='<NAME>',
author_email='<EMAIL>',
license='Apache',
packages=[
'punky_gibbon',
])
| en | 0.836478 | # Copyright (c) 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the License); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an AS IS BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and# # limitations under the License. | 1.124595 | 1 |
calculadora.py | matheus770/Calculadora-python | 0 | 6633118 | <reponame>matheus770/Calculadora-python
n1 = float(input("Digite o primeiro numero: "))
n2 = float(input("Digite o segundo numero: "))
op = int(input("Digite 1 - Soma ou 2 - Subtração: "))
if(op == 1):
soma = n1 + n2
print(f"{n1} + {n2} = {soma}")
elif(op == 2):
sub = n1 - n2
print(f"{n1} - {n2} = {sub}")
else:
print("Erro, Digite uma opção valida!")
exit() | n1 = float(input("Digite o primeiro numero: "))
n2 = float(input("Digite o segundo numero: "))
op = int(input("Digite 1 - Soma ou 2 - Subtração: "))
if(op == 1):
soma = n1 + n2
print(f"{n1} + {n2} = {soma}")
elif(op == 2):
sub = n1 - n2
print(f"{n1} - {n2} = {sub}")
else:
print("Erro, Digite uma opção valida!")
exit() | none | 1 | 3.811934 | 4 |
|
src/polytopes/example_curved_polychora.py | timgates42/pywonderland | 0 | 6633119 | # -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Render curved 4d polychoron examples
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright (c) 2018 by <NAME>.
"""
import os
import subprocess
from fractions import Fraction
from polytopes.models import Polychora
from polytopes.povray import pov_index_array1d, pov_vector
IMAGE_DIR = "polychora_frames" # directory to save the frames
POV_EXE = "povray" # POV-Ray exe binary
SCENE_FILE = "polychora_curved.pov" # the main scene file
IMAGE_SIZE = 600 # image size in pixels
FRAMES = 1 # number of frames
IMAGE_QUALITY_LEVEL = 11 # between 0-11
SUPER_SAMPLING_LEVEL = 5 # between 1-9
ANTIALIASING_LEVEL = 0.001 # lower for better quality
DATAFILE_NAME = "polychora-data.inc" # export data to this file
data_file = os.path.join(os.getcwd(), "povray", DATAFILE_NAME)
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
POV_COMMAND = (
" cd povray && "
+ " {} +I{}".format(POV_EXE, SCENE_FILE)
+ " +W{} +H{}".format(IMAGE_SIZE, IMAGE_SIZE)
+ " +Q{}".format(IMAGE_QUALITY_LEVEL)
+ " +A{}".format(ANTIALIASING_LEVEL)
+ " +R{}".format(SUPER_SAMPLING_LEVEL)
+ " +KFI0"
+ " +KFF{}".format(FRAMES - 1)
+ " -V"
+ " +O../{}/".format(IMAGE_DIR)
+ "{}"
)
POV_TEMPLATE = """
#declare bg_color = {};
#declare vertex_size = {};
#declare edge_size = {};
#declare camera_loc = {};
#declare obj_rotation = {};
#declare size_func = {};
#declare face_max= {};
#declare face_min = {};
#declare face_index = {};
#declare use_area_light = {};
// this macro is used for adjusting the size of edges
// according to their positions in the space.
#macro get_size(q)
#local len = vlength(q);
#if (size_func = 0)
#local len = (1.0 + len * len) / 4;
#else #if (size_func = 1)
#local len = 2.0 * log(2.0 + len * len);
#else
#local len = 2.0 * log(1.13 + len * len);
#end
#end
len
#end
#macro choose_face(i, face_size)
#local chosen = false;
#for (ind, 0, dimension_size(face_index, 1) - 1)
#if (i = face_index[ind])
#if (face_size > face_min & face_size < face_max)
#local chosen = true;
#end
#end
#end
chosen
#end
#declare vertices = {};
#declare edges = {};
#declare faces = {};
"""
def draw(
coxeter_diagram,
trunc_type,
extra_relations=(),
description="polychora",
bg_color="SkyBlue",
camera_loc=(0, 0, 30),
rotation=(0, 0, 0),
vertex_size=0.04,
edge_size=0.02,
size_func=0,
face_index=(0,),
face_max=3.0,
face_min=0.5,
use_area_light=False,
):
"""
Export data to povray .inc file and call the rendering process.
:param camera_loc: location of the camera.
:param size_func: choose which sizing funcion to use, currently only 0, 1, 2.
:param face_index: a list controls which types of faces are shown.
:param face_max: only faces smaller than this threshold are shown.
:param face_min: only faces larger than this threshold are shown.
"""
P = Polychora(coxeter_diagram, trunc_type, extra_relations)
P.build_geometry()
vert_data, edge_data, face_data = P.get_povray_data()
with open(data_file, "w") as f:
f.write(
POV_TEMPLATE.format(
bg_color,
vertex_size,
edge_size,
pov_vector(camera_loc),
pov_vector(rotation),
size_func,
face_max,
face_min,
pov_index_array1d(face_index),
int(use_area_light),
vert_data,
edge_data,
face_data,
)
)
print(
"rendering {}: {} vertices, {} edges, {} faces".format(
description, P.num_vertices, P.num_edges, P.num_faces
)
)
process = subprocess.Popen(
POV_COMMAND.format(description),
shell=True,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
_, err = process.communicate()
if process.returncode:
print(type(err), err)
raise IOError("POVRay error: " + err.decode("ascii"))
def main():
draw(
(3, 2, 2, 3, 2, 3),
(1, 0, 0, 0),
description="5-cell",
camera_loc=(0, 0, 120),
vertex_size=0.08,
rotation=(-30, 60, 0),
edge_size=0.04,
size_func=1,
)
draw(
(4, 2, 2, 3, 2, 3),
(1, 0, 0, 0),
description="4d-cube",
camera_loc=(0, 0, 130),
vertex_size=0.06,
rotation=(60, 0, 0),
edge_size=0.03,
size_func=1,
face_min=0.2,
face_max=0.8,
)
draw(
(3, 2, 2, 3, 2, 4),
(1, 0, 0, 0),
description="16-cell",
camera_loc=(0, 0, 160),
vertex_size=0.08,
edge_size=0.03,
size_func=2,
face_min=1.0,
face_max=1.2,
)
draw(
(3, 2, 2, 4, 2, 3),
(1, 0, 0, 0),
description="24-cell",
camera_loc=(0, 0, 200),
vertex_size=0.06,
edge_size=0.04,
size_func=2,
face_min=0.2,
face_max=0.8,
)
draw(
(5, 2, 2, 3, 2, 3),
(1, 0, 0, 0),
description="120-cell",
camera_loc=(0, 0, 400),
vertex_size=0.05,
edge_size=0.025,
size_func=0,
face_min=3.0,
face_max=100.0,
)
draw(
(3, 2, 2, 3, 2, 5),
(1, 0, 0, 0),
description="600-cell",
bg_color="White",
camera_loc=(0, 0, 500),
vertex_size=0.12,
edge_size=0.04,
size_func=2,
face_max=4.0,
face_min=3.0,
)
draw(
(3, 2, 2, 3, 2, 4),
(1, 0, 0, 1),
description="runcinated-16-cell",
bg_color="White",
camera_loc=(0, 0, 450),
vertex_size=0.1,
face_index=(0, 1, 2, 3),
edge_size=0.04,
size_func=1,
face_min=0,
face_max=3,
)
draw(
(5, 2, 2, 3, 2, 3),
(1, 0, 0, 1),
description="runcinated-120-cell",
camera_loc=(0, 0, 360),
vertex_size=0.028,
edge_size=0.014,
face_min=20,
)
# this is the settings I used to render the movie at
# http://pywonderland.com/images/cn/polytopes/rectified-grand-stellated-120-cell.mp4
# (the parameters are not exactly the same but very similar)
# take quite a while to render.
draw(
(Fraction(5, 2), 2, 2, 5, 2, Fraction(5, 2)),
(0, 1, 0, 0),
extra_relations=((0, 1, 2, 1) * 3, (1, 2, 3, 2) * 3),
description="rectified-grand-stellated-120-cell",
size_func=1,
vertex_size=0.06,
edge_size=0.03,
use_area_light=1,
face_max=0.0,
camera_loc=(0, 0, 400),
)
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Render curved 4d polychoron examples
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright (c) 2018 by <NAME>.
"""
import os
import subprocess
from fractions import Fraction
from polytopes.models import Polychora
from polytopes.povray import pov_index_array1d, pov_vector
IMAGE_DIR = "polychora_frames" # directory to save the frames
POV_EXE = "povray" # POV-Ray exe binary
SCENE_FILE = "polychora_curved.pov" # the main scene file
IMAGE_SIZE = 600 # image size in pixels
FRAMES = 1 # number of frames
IMAGE_QUALITY_LEVEL = 11 # between 0-11
SUPER_SAMPLING_LEVEL = 5 # between 1-9
ANTIALIASING_LEVEL = 0.001 # lower for better quality
DATAFILE_NAME = "polychora-data.inc" # export data to this file
data_file = os.path.join(os.getcwd(), "povray", DATAFILE_NAME)
if not os.path.exists(IMAGE_DIR):
os.makedirs(IMAGE_DIR)
POV_COMMAND = (
" cd povray && "
+ " {} +I{}".format(POV_EXE, SCENE_FILE)
+ " +W{} +H{}".format(IMAGE_SIZE, IMAGE_SIZE)
+ " +Q{}".format(IMAGE_QUALITY_LEVEL)
+ " +A{}".format(ANTIALIASING_LEVEL)
+ " +R{}".format(SUPER_SAMPLING_LEVEL)
+ " +KFI0"
+ " +KFF{}".format(FRAMES - 1)
+ " -V"
+ " +O../{}/".format(IMAGE_DIR)
+ "{}"
)
POV_TEMPLATE = """
#declare bg_color = {};
#declare vertex_size = {};
#declare edge_size = {};
#declare camera_loc = {};
#declare obj_rotation = {};
#declare size_func = {};
#declare face_max= {};
#declare face_min = {};
#declare face_index = {};
#declare use_area_light = {};
// this macro is used for adjusting the size of edges
// according to their positions in the space.
#macro get_size(q)
#local len = vlength(q);
#if (size_func = 0)
#local len = (1.0 + len * len) / 4;
#else #if (size_func = 1)
#local len = 2.0 * log(2.0 + len * len);
#else
#local len = 2.0 * log(1.13 + len * len);
#end
#end
len
#end
#macro choose_face(i, face_size)
#local chosen = false;
#for (ind, 0, dimension_size(face_index, 1) - 1)
#if (i = face_index[ind])
#if (face_size > face_min & face_size < face_max)
#local chosen = true;
#end
#end
#end
chosen
#end
#declare vertices = {};
#declare edges = {};
#declare faces = {};
"""
def draw(
coxeter_diagram,
trunc_type,
extra_relations=(),
description="polychora",
bg_color="SkyBlue",
camera_loc=(0, 0, 30),
rotation=(0, 0, 0),
vertex_size=0.04,
edge_size=0.02,
size_func=0,
face_index=(0,),
face_max=3.0,
face_min=0.5,
use_area_light=False,
):
"""
Export data to povray .inc file and call the rendering process.
:param camera_loc: location of the camera.
:param size_func: choose which sizing funcion to use, currently only 0, 1, 2.
:param face_index: a list controls which types of faces are shown.
:param face_max: only faces smaller than this threshold are shown.
:param face_min: only faces larger than this threshold are shown.
"""
P = Polychora(coxeter_diagram, trunc_type, extra_relations)
P.build_geometry()
vert_data, edge_data, face_data = P.get_povray_data()
with open(data_file, "w") as f:
f.write(
POV_TEMPLATE.format(
bg_color,
vertex_size,
edge_size,
pov_vector(camera_loc),
pov_vector(rotation),
size_func,
face_max,
face_min,
pov_index_array1d(face_index),
int(use_area_light),
vert_data,
edge_data,
face_data,
)
)
print(
"rendering {}: {} vertices, {} edges, {} faces".format(
description, P.num_vertices, P.num_edges, P.num_faces
)
)
process = subprocess.Popen(
POV_COMMAND.format(description),
shell=True,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
_, err = process.communicate()
if process.returncode:
print(type(err), err)
raise IOError("POVRay error: " + err.decode("ascii"))
def main():
draw(
(3, 2, 2, 3, 2, 3),
(1, 0, 0, 0),
description="5-cell",
camera_loc=(0, 0, 120),
vertex_size=0.08,
rotation=(-30, 60, 0),
edge_size=0.04,
size_func=1,
)
draw(
(4, 2, 2, 3, 2, 3),
(1, 0, 0, 0),
description="4d-cube",
camera_loc=(0, 0, 130),
vertex_size=0.06,
rotation=(60, 0, 0),
edge_size=0.03,
size_func=1,
face_min=0.2,
face_max=0.8,
)
draw(
(3, 2, 2, 3, 2, 4),
(1, 0, 0, 0),
description="16-cell",
camera_loc=(0, 0, 160),
vertex_size=0.08,
edge_size=0.03,
size_func=2,
face_min=1.0,
face_max=1.2,
)
draw(
(3, 2, 2, 4, 2, 3),
(1, 0, 0, 0),
description="24-cell",
camera_loc=(0, 0, 200),
vertex_size=0.06,
edge_size=0.04,
size_func=2,
face_min=0.2,
face_max=0.8,
)
draw(
(5, 2, 2, 3, 2, 3),
(1, 0, 0, 0),
description="120-cell",
camera_loc=(0, 0, 400),
vertex_size=0.05,
edge_size=0.025,
size_func=0,
face_min=3.0,
face_max=100.0,
)
draw(
(3, 2, 2, 3, 2, 5),
(1, 0, 0, 0),
description="600-cell",
bg_color="White",
camera_loc=(0, 0, 500),
vertex_size=0.12,
edge_size=0.04,
size_func=2,
face_max=4.0,
face_min=3.0,
)
draw(
(3, 2, 2, 3, 2, 4),
(1, 0, 0, 1),
description="runcinated-16-cell",
bg_color="White",
camera_loc=(0, 0, 450),
vertex_size=0.1,
face_index=(0, 1, 2, 3),
edge_size=0.04,
size_func=1,
face_min=0,
face_max=3,
)
draw(
(5, 2, 2, 3, 2, 3),
(1, 0, 0, 1),
description="runcinated-120-cell",
camera_loc=(0, 0, 360),
vertex_size=0.028,
edge_size=0.014,
face_min=20,
)
# this is the settings I used to render the movie at
# http://pywonderland.com/images/cn/polytopes/rectified-grand-stellated-120-cell.mp4
# (the parameters are not exactly the same but very similar)
# take quite a while to render.
draw(
(Fraction(5, 2), 2, 2, 5, 2, Fraction(5, 2)),
(0, 1, 0, 0),
extra_relations=((0, 1, 2, 1) * 3, (1, 2, 3, 2) * 3),
description="rectified-grand-stellated-120-cell",
size_func=1,
vertex_size=0.06,
edge_size=0.03,
use_area_light=1,
face_max=0.0,
camera_loc=(0, 0, 400),
)
if __name__ == "__main__":
main()
| en | 0.650177 | # -*- coding: utf-8 -*- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Render curved 4d polychoron examples ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright (c) 2018 by <NAME>. # directory to save the frames # POV-Ray exe binary # the main scene file # image size in pixels # number of frames # between 0-11 # between 1-9 # lower for better quality # export data to this file #declare bg_color = {}; #declare vertex_size = {}; #declare edge_size = {}; #declare camera_loc = {}; #declare obj_rotation = {}; #declare size_func = {}; #declare face_max= {}; #declare face_min = {}; #declare face_index = {}; #declare use_area_light = {}; // this macro is used for adjusting the size of edges // according to their positions in the space. #macro get_size(q) #local len = vlength(q); #if (size_func = 0) #local len = (1.0 + len * len) / 4; #else #if (size_func = 1) #local len = 2.0 * log(2.0 + len * len); #else #local len = 2.0 * log(1.13 + len * len); #end #end len #end #macro choose_face(i, face_size) #local chosen = false; #for (ind, 0, dimension_size(face_index, 1) - 1) #if (i = face_index[ind]) #if (face_size > face_min & face_size < face_max) #local chosen = true; #end #end #end chosen #end #declare vertices = {}; #declare edges = {}; #declare faces = {}; Export data to povray .inc file and call the rendering process. :param camera_loc: location of the camera. :param size_func: choose which sizing funcion to use, currently only 0, 1, 2. :param face_index: a list controls which types of faces are shown. :param face_max: only faces smaller than this threshold are shown. :param face_min: only faces larger than this threshold are shown. # this is the settings I used to render the movie at # http://pywonderland.com/images/cn/polytopes/rectified-grand-stellated-120-cell.mp4 # (the parameters are not exactly the same but very similar) # take quite a while to render. | 2.409191 | 2 |
threeML/test/test_fits_file.py | Husky22/threeML | 0 | 6633120 |
from threeML.io.fits_file import FITSExtension, FITSFile
import numpy as np
import astropy.io.fits as fits
import pytest
class DUMMYEXT(FITSExtension):
def __init__(self, test_value):
data_list = [('TEST_VALUE', test_value)]
super(DUMMYEXT, self).__init__(tuple(data_list), (('EXTNAME', 'TEST', 'Extension name'),) )
class DUMMYFITS(FITSFile):
def __init__(self, test_value):
dummy_extension = DUMMYEXT(test_value)
super(DUMMYFITS, self).__init__(fits_extensions=[dummy_extension])
def test_fits_file():
dtypes = [np.int16,np.int32,np.int64,np.uint16,np.uint32,np.float32,np.float64]
dtype_keys = ['I','J','K','I','J','E','D']
for i, dt in enumerate(dtypes):
test_values = np.ones(10,dtype=dt)
dummy_fits = DUMMYFITS(test_value=test_values)
assert len(dummy_fits._hdu_list) == 2
assert dummy_fits.index_of('TEST') == 1
assert dummy_fits['TEST'].header['TFORM1'] == dtype_keys[i]
assert np.alltrue(dummy_fits['TEST'].data['TEST_VALUE'] == test_values)
file_name = 'test_fits%d.fits'%i
dummy_fits.writeto(file_name,overwrite=True)
with pytest.raises(IOError):
dummy_fits.writeto(file_name, overwrite=False)
read_dummy_fits = fits.open(file_name)
assert len(read_dummy_fits) == 2
assert read_dummy_fits.index_of('TEST') == 1
assert read_dummy_fits['TEST'].header['TFORM1'] == dtype_keys[i]
assert np.alltrue(read_dummy_fits['TEST'].data['TEST_VALUE'] == test_values)
|
from threeML.io.fits_file import FITSExtension, FITSFile
import numpy as np
import astropy.io.fits as fits
import pytest
class DUMMYEXT(FITSExtension):
def __init__(self, test_value):
data_list = [('TEST_VALUE', test_value)]
super(DUMMYEXT, self).__init__(tuple(data_list), (('EXTNAME', 'TEST', 'Extension name'),) )
class DUMMYFITS(FITSFile):
def __init__(self, test_value):
dummy_extension = DUMMYEXT(test_value)
super(DUMMYFITS, self).__init__(fits_extensions=[dummy_extension])
def test_fits_file():
dtypes = [np.int16,np.int32,np.int64,np.uint16,np.uint32,np.float32,np.float64]
dtype_keys = ['I','J','K','I','J','E','D']
for i, dt in enumerate(dtypes):
test_values = np.ones(10,dtype=dt)
dummy_fits = DUMMYFITS(test_value=test_values)
assert len(dummy_fits._hdu_list) == 2
assert dummy_fits.index_of('TEST') == 1
assert dummy_fits['TEST'].header['TFORM1'] == dtype_keys[i]
assert np.alltrue(dummy_fits['TEST'].data['TEST_VALUE'] == test_values)
file_name = 'test_fits%d.fits'%i
dummy_fits.writeto(file_name,overwrite=True)
with pytest.raises(IOError):
dummy_fits.writeto(file_name, overwrite=False)
read_dummy_fits = fits.open(file_name)
assert len(read_dummy_fits) == 2
assert read_dummy_fits.index_of('TEST') == 1
assert read_dummy_fits['TEST'].header['TFORM1'] == dtype_keys[i]
assert np.alltrue(read_dummy_fits['TEST'].data['TEST_VALUE'] == test_values)
| none | 1 | 2.067719 | 2 |
|
src/repeated_string_match_686.py | xiezhq-hermann/LeetCode-in-Python | 3 | 6633121 | class Solution(object):
def repeatedStringMatch(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
base = (len(B)-1)//len(A) + 1
for i in range(base, base+2):
if B in A*i:
return i
else:
return -1
| class Solution(object):
def repeatedStringMatch(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
base = (len(B)-1)//len(A) + 1
for i in range(base, base+2):
if B in A*i:
return i
else:
return -1
| en | 0.434094 | :type A: str :type B: str :rtype: int | 3.476163 | 3 |
cime/synthesis.py | iro-upgto/cime | 0 | 6633122 | from sympy import *
# ~ from sympy.matrices import *
from sympy.geometry import *
import numpy as np
import matplotlib.pyplot as plt
def two_positions(s1,s2):
pass
if __name__=="__main__":
# ~ s1 = Segment
# ~ s2 = (1,0)
two_positions()
| from sympy import *
# ~ from sympy.matrices import *
from sympy.geometry import *
import numpy as np
import matplotlib.pyplot as plt
def two_positions(s1,s2):
pass
if __name__=="__main__":
# ~ s1 = Segment
# ~ s2 = (1,0)
two_positions()
| en | 0.679928 | # ~ from sympy.matrices import * # ~ s1 = Segment # ~ s2 = (1,0) | 2.403321 | 2 |
tests/test_user.py | eshta/authentek | 0 | 6633123 | <filename>tests/test_user.py
from flask import url_for
from authentek.database.models import User
def test_get_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.get(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
# test get_user
user_url = url_for('api.user_by_id', user_id=user.id)
rep = client.get(user_url, headers=admin_headers)
assert rep.status_code == 200
data = rep.get_json()["user"]
assert data["username"] == user.username
assert data["email"] == user.email
assert data["active"] == user.active
def test_put_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.put(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
data = {"username": "updated"}
user_url = url_for('api.user_by_id', user_id=user.id)
# test update user
rep = client.put(user_url, json=data, headers=admin_headers)
assert rep.status_code == 200
data = rep.get_json()["user"]
assert data["username"] == "updated"
assert data["email"] == user.email
assert data["active"] == user.active
def test_delete_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.delete(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
# test get_user
user_url = url_for('api.user_by_id', user_id=user.id)
rep = client.delete(user_url, headers=admin_headers)
assert rep.status_code == 200
assert db.session.query(User).filter_by(id=user.id).first() is None
def test_create_user(client, db, admin_headers):
# test bad data
users_url = url_for('api.users')
data = {"username": "created"}
rep = client.post(users_url, json=data, headers=admin_headers)
assert rep.status_code == 400
data["password"] = "<PASSWORD>"
data["email"] = "<EMAIL>"
rep = client.post(users_url, json=data, headers=admin_headers)
assert rep.status_code == 201
data = rep.get_json()
user = db.session.query(User).filter_by(id=data["user"]["id"]).first()
assert user.username == "created"
assert user.email == "<EMAIL>"
def test_get_all_user(client, db, user_factory, admin_headers):
users_url = url_for('api.users')
users = user_factory.create_batch(30)
db.session.add_all(users)
db.session.commit()
rep = client.get(users_url, headers=admin_headers)
assert rep.status_code == 200
results = rep.get_json()
for user in users:
assert any(u["id"] == user.id for u in results["results"])
| <filename>tests/test_user.py
from flask import url_for
from authentek.database.models import User
def test_get_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.get(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
# test get_user
user_url = url_for('api.user_by_id', user_id=user.id)
rep = client.get(user_url, headers=admin_headers)
assert rep.status_code == 200
data = rep.get_json()["user"]
assert data["username"] == user.username
assert data["email"] == user.email
assert data["active"] == user.active
def test_put_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.put(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
data = {"username": "updated"}
user_url = url_for('api.user_by_id', user_id=user.id)
# test update user
rep = client.put(user_url, json=data, headers=admin_headers)
assert rep.status_code == 200
data = rep.get_json()["user"]
assert data["username"] == "updated"
assert data["email"] == user.email
assert data["active"] == user.active
def test_delete_user(client, db, user, admin_headers):
# test 404
user_url = url_for('api.user_by_id', user_id="100000")
rep = client.delete(user_url, headers=admin_headers)
assert rep.status_code == 404
db.session.add(user)
db.session.commit()
# test get_user
user_url = url_for('api.user_by_id', user_id=user.id)
rep = client.delete(user_url, headers=admin_headers)
assert rep.status_code == 200
assert db.session.query(User).filter_by(id=user.id).first() is None
def test_create_user(client, db, admin_headers):
# test bad data
users_url = url_for('api.users')
data = {"username": "created"}
rep = client.post(users_url, json=data, headers=admin_headers)
assert rep.status_code == 400
data["password"] = "<PASSWORD>"
data["email"] = "<EMAIL>"
rep = client.post(users_url, json=data, headers=admin_headers)
assert rep.status_code == 201
data = rep.get_json()
user = db.session.query(User).filter_by(id=data["user"]["id"]).first()
assert user.username == "created"
assert user.email == "<EMAIL>"
def test_get_all_user(client, db, user_factory, admin_headers):
users_url = url_for('api.users')
users = user_factory.create_batch(30)
db.session.add_all(users)
db.session.commit()
rep = client.get(users_url, headers=admin_headers)
assert rep.status_code == 200
results = rep.get_json()
for user in users:
assert any(u["id"] == user.id for u in results["results"])
| en | 0.115415 | # test 404 # test get_user # test 404 # test update user # test 404 # test get_user # test bad data | 2.573247 | 3 |
tests/test_unit/test_pals.py | Cray-HPE/craycli | 1 | 6633124 | # MIT License
#
# (C) Copyright [2020-2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
test_pals.py - Unit tests for the pals module
"""
import io
import json
import os
import resource
import signal
import tempfile
import click
import pytest
from cray import pals
from ..utils.utils import compare_dicts
def test_signals():
# pylint: disable=comparison-with-callable
""" Test signal handling setup """
pals.signal_handler(signal.SIGTERM, None)
assert pals.SIGNAL_RECEIVED == signal.SIGTERM
assert pals.setup_signals() >= 0
for signum in [
signal.SIGHUP,
signal.SIGINT,
signal.SIGQUIT,
signal.SIGABRT,
signal.SIGALRM,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
]:
assert signal.getsignal(signum) == pals.signal_handler
def test_make_ws_url():
""" Test making a websocket URL from an API gateway URL """
wsurl = pals.make_ws_url("test", "https://api-gw-service-nmn.local:30443")
assert wsurl == "wss://api-gw-service-nmn.local:30443/test"
wsurl = pals.make_ws_url("test", "api-gw-service-nmn.local:30443")
assert wsurl == "wss://api-gw-service-nmn.local:30443/test"
def test_get_rpc():
""" Test JSON-RPC request creation """
rpc = json.loads(pals.get_rpc("test"))
expected = {"jsonrpc": "2.0", "method": "test"}
compare_dicts(expected, rpc)
rpc = json.loads(pals.get_rpc("test", foo="bar"))
expected = {"jsonrpc": "2.0", "method": "test", "params": {"foo": "bar"}}
compare_dicts(expected, rpc)
rpc = json.loads(pals.get_rpc("test", "1234"))
expected = {"jsonrpc": "2.0", "method": "test", "id": "1234"}
compare_dicts(expected, rpc)
class MockSocket(object):
""" Mock socket class that receives canned content """
def __init__(self, recv_queue=None):
self.recv_queue = recv_queue
self.send_queue = []
def recv(self):
""" Receive a message from the socket """
if self.recv_queue:
return self.recv_queue.pop(0)
return b""
def send(self, msg):
""" Send a message to the socket """
self.send_queue.append(msg)
def test_send_rpc():
""" Test RPC sending """
sock = MockSocket()
pals.send_rpc(sock, "start")
pals.send_rpc(sock, "stream", "myrpc", foo="bar")
start = {"jsonrpc": "2.0", "method": "start"}
stream = {
"jsonrpc": "2.0",
"method": "stream",
"params": {"foo": "bar"},
"id": "myrpc",
}
compare_dicts(start, json.loads(sock.send_queue[0]))
compare_dicts(stream, json.loads(sock.send_queue[1]))
def test_get_exit_code():
""" Test exit status to exit code translation """
# Test out extremes for exit status
assert pals.get_exit_code(0x0000) == 0
assert pals.get_exit_code(0xFF00) == 255
# Test out extremes for termination signals
assert pals.get_exit_code(0x0001) == 129
assert pals.get_exit_code(0x007F) == 255
# Core dump shouldn't affect exit code
assert pals.get_exit_code(0x0081) == 129
assert pals.get_exit_code(0x00FF) == 255
def test_log_rank_exit():
""" Test logging rank exits (mainly for coverage) """
# Log shepherd exits
pals.log_rank_exit(-1, "nid000001", 0x0000)
pals.log_rank_exit(-1, "nid000001", 0xFF00)
pals.log_rank_exit(-1, "nid000001", 0x0001)
pals.log_rank_exit(-1, "nid000001", 0x007F)
pals.log_rank_exit(-1, "nid000001", 0x0081)
pals.log_rank_exit(-1, "nid000001", 0x00FF)
# Log rank exits
pals.log_rank_exit(0, "nid000001", 0x0000)
pals.log_rank_exit(0, "nid000001", 0xFF00)
pals.log_rank_exit(0, "nid000001", 0x0001)
pals.log_rank_exit(0, "nid000001", 0x007F)
pals.log_rank_exit(0, "nid000001", 0x0081)
pals.log_rank_exit(0, "nid000001", 0x00FF)
def test_handle_rpc(): # pylint: disable=too-many-locals
""" Test handling PALS RPCs """
sock = MockSocket()
app = pals.PALSApp()
stream_response = {"jsonrpc": "2.0", "result": None, "id": app.stream_rpcid}
start_rpc = {"jsonrpc": "2.0", "method": "start", "id": app.start_rpcid}
app.handle_rpc(sock, stream_response)
compare_dicts(start_rpc, json.loads(sock.send_queue[0]))
# Make temporary file for procinfo
tmpfd, tmpfname = tempfile.mkstemp()
os.close(tmpfd)
start_response = {"jsonrpc": "2.0", "result": None, "id": app.start_rpcid}
procinfo_rpc = {"jsonrpc": "2.0", "method": "procinfo", "id": app.procinfo_rpcid}
app.handle_rpc(sock, start_response, procinfo_file=tmpfname)
compare_dicts(json.loads(sock.send_queue[1]), procinfo_rpc)
procinfo = {
"apid": "5a2ecfa0-c99b-47f4-ae07-636da6dcc07e",
"pids": [123, 234, 345, 456],
"placement": [0, 0, 1, 2],
"cmdidxs": [0, 0, 0, 1],
"nodes": ["nid000001", "nid000002", "nid000003"],
"executables": ["/home/users/seymour/a.out", "/home/users/seymour/b.out"],
}
procinfo_response = {"jsonrpc": "2.0", "result": procinfo, "id": app.procinfo_rpcid}
app.handle_rpc(sock, procinfo_response, procinfo_file=tmpfname)
with open(tmpfname, encoding='utf-8') as tmpfp:
result = json.load(tmpfp)
os.unlink(tmpfname)
compare_dicts(procinfo, result)
stdout_rpc = {
"jsonrpc": "2.0",
"method": "stdout",
"params": {"content": "test", "encoding": "UTF-8"},
}
app.handle_rpc(sock, stdout_rpc)
stderr_rpc = {
"jsonrpc": "2.0",
"method": "stderr",
"params": {"content": "test", "encoding": "UTF-8"},
}
app.handle_rpc(sock, stderr_rpc)
exit_rpc = {
"jsonrpc": "2.0",
"method": "exit",
"params": {"rankid": 0, "host": "nid000001", "status": 1},
}
app.handle_rpc(sock, exit_rpc)
complete_rpc = {"jsonrpc": "2.0", "method": "complete"}
app.handle_rpc(sock, complete_rpc)
assert app.complete
error_rpc = {
"jsonrpc": "2.0",
"error": {"code": -32601, "message": "RPC method foo not found"},
}
with pytest.raises(click.ClickException):
app.handle_rpc(sock, error_rpc)
def test_forward_stdin():
""" Test forwarding stdin to application """
sock = MockSocket()
# Send some UTF-8 content
tmpfd, tmpfname = tempfile.mkstemp()
os.write(tmpfd, b"test")
os.lseek(tmpfd, 0, os.SEEK_SET)
with os.fdopen(tmpfd) as tmpf:
pals.forward_stdin(sock, tmpf)
os.unlink(tmpfname)
expected = {
"jsonrpc": "2.0",
"method": "stdin",
"params": {"content": "test", "encoding": "UTF-8"},
}
compare_dicts(expected, json.loads(sock.send_queue[0]))
expected = {"jsonrpc": "2.0", "method": "stdin", "params": {"eof": True}}
compare_dicts(expected, json.loads(sock.send_queue[1]))
# Send some non-UTF-8 content
sock.send_queue = []
tmpfd, tmpfname = tempfile.mkstemp()
os.write(tmpfd, b"\xc0")
os.lseek(tmpfd, 0, os.SEEK_SET)
with os.fdopen(tmpfd) as tmpf:
pals.forward_stdin(sock, tmpf)
os.unlink(tmpfname)
expected = {
"jsonrpc": "2.0",
"method": "stdin",
"params": {"content": "wA==", "encoding": "base64"},
}
compare_dicts(expected, json.loads(sock.send_queue[0]))
expected = {"jsonrpc": "2.0", "method": "stdin", "params": {"eof": True}}
compare_dicts(expected, json.loads(sock.send_queue[1]))
def test_find_executable():
""" Test searching for executable files """
oldpath = os.environ.get("PATH")
# If path contains a slash it's returned verbatim
assert pals.find_executable("foo/bar") == "foo/bar"
# If no PATH is set it's returned verbatim
if "PATH" in os.environ:
del os.environ["PATH"]
assert pals.find_executable("foobar") == "foobar"
# Test with something we should find
if os.path.exists("/usr/bin/true") and os.access("/usr/bin/true", os.X_OK):
os.environ["PATH"] = "/foobar:/usr/bin:/bin"
assert pals.find_executable("true") == "/usr/bin/true"
# Test with something we shouldn't find
os.environ["PATH"] = "."
assert pals.find_executable("foobar") is None
# Reset PATH
os.environ["PATH"] = oldpath
def test_get_executables():
""" Test getting the set of binaries to transfer """
req = {
"cmds": [
{"argv": ["/bin/pwd"]},
{"argv": ["/bin/pwd"]},
{"argv": ["/bin/echo", "foo"]},
]
}
# transfer=false doesn't modify argv
assert pals.get_executables(req, False) == set(["/bin/pwd", "/bin/echo"])
assert req["cmds"][0]["argv"] == ["/bin/pwd"]
assert req["cmds"][1]["argv"] == ["/bin/pwd"]
assert req["cmds"][2]["argv"] == ["/bin/echo", "foo"]
# transfer=true modifies argv
assert pals.get_executables(req, True) == set(["/bin/pwd", "/bin/echo"])
assert req["cmds"][0]["argv"] == ["pwd"]
assert req["cmds"][1]["argv"] == ["pwd"]
assert req["cmds"][2]["argv"] == ["echo", "foo"]
def test_split_mpmd_args():
""" Test splitting MPMD arguments """
assert pals.split_mpmd_args(["hostname"]) == [["hostname"]]
assert pals.split_mpmd_args(["hostname", ":", "echo"]) == [["hostname"], ["echo"]]
assert pals.split_mpmd_args(["hostname", "-v", ":", "-n4", "echo", "foo"]) == [
["hostname", "-v"],
["-n4", "echo", "foo"],
]
def test_get_resource_limits():
""" Test fetching resource limits """
# pylint: disable=use-implicit-booleaness-not-comparison
assert pals.get_resource_limits([]) == {}
# pylint: disable=use-implicit-booleaness-not-comparison
assert pals.get_resource_limits(["foo"]) == {}
soft, hard = resource.getrlimit(
resource.RLIMIT_CORE
) # pylint: disable=c-extension-no-member
assert pals.get_resource_limits(["CORE"]) == {"CORE": "%d %d" % (soft, hard)}
def test_parse_hostfile():
""" Test host file parsing """
hostfile = io.StringIO("\n# comment line\nhost1\n host2 \n")
assert pals.parse_hostfile(hostfile) == ["host1", "host2"]
hostfile.close()
with pytest.raises(ValueError):
pals.parse_hostfile(hostfile)
hostfile = io.StringIO("\n# comment line\nhost1\n host1 \n")
assert pals.parse_hostfile(hostfile) == ["host1", "host1"]
hostfile.close()
| # MIT License
#
# (C) Copyright [2020-2022] Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
test_pals.py - Unit tests for the pals module
"""
import io
import json
import os
import resource
import signal
import tempfile
import click
import pytest
from cray import pals
from ..utils.utils import compare_dicts
def test_signals():
# pylint: disable=comparison-with-callable
""" Test signal handling setup """
pals.signal_handler(signal.SIGTERM, None)
assert pals.SIGNAL_RECEIVED == signal.SIGTERM
assert pals.setup_signals() >= 0
for signum in [
signal.SIGHUP,
signal.SIGINT,
signal.SIGQUIT,
signal.SIGABRT,
signal.SIGALRM,
signal.SIGTERM,
signal.SIGUSR1,
signal.SIGUSR2,
]:
assert signal.getsignal(signum) == pals.signal_handler
def test_make_ws_url():
""" Test making a websocket URL from an API gateway URL """
wsurl = pals.make_ws_url("test", "https://api-gw-service-nmn.local:30443")
assert wsurl == "wss://api-gw-service-nmn.local:30443/test"
wsurl = pals.make_ws_url("test", "api-gw-service-nmn.local:30443")
assert wsurl == "wss://api-gw-service-nmn.local:30443/test"
def test_get_rpc():
""" Test JSON-RPC request creation """
rpc = json.loads(pals.get_rpc("test"))
expected = {"jsonrpc": "2.0", "method": "test"}
compare_dicts(expected, rpc)
rpc = json.loads(pals.get_rpc("test", foo="bar"))
expected = {"jsonrpc": "2.0", "method": "test", "params": {"foo": "bar"}}
compare_dicts(expected, rpc)
rpc = json.loads(pals.get_rpc("test", "1234"))
expected = {"jsonrpc": "2.0", "method": "test", "id": "1234"}
compare_dicts(expected, rpc)
class MockSocket(object):
""" Mock socket class that receives canned content """
def __init__(self, recv_queue=None):
self.recv_queue = recv_queue
self.send_queue = []
def recv(self):
""" Receive a message from the socket """
if self.recv_queue:
return self.recv_queue.pop(0)
return b""
def send(self, msg):
""" Send a message to the socket """
self.send_queue.append(msg)
def test_send_rpc():
""" Test RPC sending """
sock = MockSocket()
pals.send_rpc(sock, "start")
pals.send_rpc(sock, "stream", "myrpc", foo="bar")
start = {"jsonrpc": "2.0", "method": "start"}
stream = {
"jsonrpc": "2.0",
"method": "stream",
"params": {"foo": "bar"},
"id": "myrpc",
}
compare_dicts(start, json.loads(sock.send_queue[0]))
compare_dicts(stream, json.loads(sock.send_queue[1]))
def test_get_exit_code():
""" Test exit status to exit code translation """
# Test out extremes for exit status
assert pals.get_exit_code(0x0000) == 0
assert pals.get_exit_code(0xFF00) == 255
# Test out extremes for termination signals
assert pals.get_exit_code(0x0001) == 129
assert pals.get_exit_code(0x007F) == 255
# Core dump shouldn't affect exit code
assert pals.get_exit_code(0x0081) == 129
assert pals.get_exit_code(0x00FF) == 255
def test_log_rank_exit():
""" Test logging rank exits (mainly for coverage) """
# Log shepherd exits
pals.log_rank_exit(-1, "nid000001", 0x0000)
pals.log_rank_exit(-1, "nid000001", 0xFF00)
pals.log_rank_exit(-1, "nid000001", 0x0001)
pals.log_rank_exit(-1, "nid000001", 0x007F)
pals.log_rank_exit(-1, "nid000001", 0x0081)
pals.log_rank_exit(-1, "nid000001", 0x00FF)
# Log rank exits
pals.log_rank_exit(0, "nid000001", 0x0000)
pals.log_rank_exit(0, "nid000001", 0xFF00)
pals.log_rank_exit(0, "nid000001", 0x0001)
pals.log_rank_exit(0, "nid000001", 0x007F)
pals.log_rank_exit(0, "nid000001", 0x0081)
pals.log_rank_exit(0, "nid000001", 0x00FF)
def test_handle_rpc(): # pylint: disable=too-many-locals
""" Test handling PALS RPCs """
sock = MockSocket()
app = pals.PALSApp()
stream_response = {"jsonrpc": "2.0", "result": None, "id": app.stream_rpcid}
start_rpc = {"jsonrpc": "2.0", "method": "start", "id": app.start_rpcid}
app.handle_rpc(sock, stream_response)
compare_dicts(start_rpc, json.loads(sock.send_queue[0]))
# Make temporary file for procinfo
tmpfd, tmpfname = tempfile.mkstemp()
os.close(tmpfd)
start_response = {"jsonrpc": "2.0", "result": None, "id": app.start_rpcid}
procinfo_rpc = {"jsonrpc": "2.0", "method": "procinfo", "id": app.procinfo_rpcid}
app.handle_rpc(sock, start_response, procinfo_file=tmpfname)
compare_dicts(json.loads(sock.send_queue[1]), procinfo_rpc)
procinfo = {
"apid": "5a2ecfa0-c99b-47f4-ae07-636da6dcc07e",
"pids": [123, 234, 345, 456],
"placement": [0, 0, 1, 2],
"cmdidxs": [0, 0, 0, 1],
"nodes": ["nid000001", "nid000002", "nid000003"],
"executables": ["/home/users/seymour/a.out", "/home/users/seymour/b.out"],
}
procinfo_response = {"jsonrpc": "2.0", "result": procinfo, "id": app.procinfo_rpcid}
app.handle_rpc(sock, procinfo_response, procinfo_file=tmpfname)
with open(tmpfname, encoding='utf-8') as tmpfp:
result = json.load(tmpfp)
os.unlink(tmpfname)
compare_dicts(procinfo, result)
stdout_rpc = {
"jsonrpc": "2.0",
"method": "stdout",
"params": {"content": "test", "encoding": "UTF-8"},
}
app.handle_rpc(sock, stdout_rpc)
stderr_rpc = {
"jsonrpc": "2.0",
"method": "stderr",
"params": {"content": "test", "encoding": "UTF-8"},
}
app.handle_rpc(sock, stderr_rpc)
exit_rpc = {
"jsonrpc": "2.0",
"method": "exit",
"params": {"rankid": 0, "host": "nid000001", "status": 1},
}
app.handle_rpc(sock, exit_rpc)
complete_rpc = {"jsonrpc": "2.0", "method": "complete"}
app.handle_rpc(sock, complete_rpc)
assert app.complete
error_rpc = {
"jsonrpc": "2.0",
"error": {"code": -32601, "message": "RPC method foo not found"},
}
with pytest.raises(click.ClickException):
app.handle_rpc(sock, error_rpc)
def test_forward_stdin():
""" Test forwarding stdin to application """
sock = MockSocket()
# Send some UTF-8 content
tmpfd, tmpfname = tempfile.mkstemp()
os.write(tmpfd, b"test")
os.lseek(tmpfd, 0, os.SEEK_SET)
with os.fdopen(tmpfd) as tmpf:
pals.forward_stdin(sock, tmpf)
os.unlink(tmpfname)
expected = {
"jsonrpc": "2.0",
"method": "stdin",
"params": {"content": "test", "encoding": "UTF-8"},
}
compare_dicts(expected, json.loads(sock.send_queue[0]))
expected = {"jsonrpc": "2.0", "method": "stdin", "params": {"eof": True}}
compare_dicts(expected, json.loads(sock.send_queue[1]))
# Send some non-UTF-8 content
sock.send_queue = []
tmpfd, tmpfname = tempfile.mkstemp()
os.write(tmpfd, b"\xc0")
os.lseek(tmpfd, 0, os.SEEK_SET)
with os.fdopen(tmpfd) as tmpf:
pals.forward_stdin(sock, tmpf)
os.unlink(tmpfname)
expected = {
"jsonrpc": "2.0",
"method": "stdin",
"params": {"content": "wA==", "encoding": "base64"},
}
compare_dicts(expected, json.loads(sock.send_queue[0]))
expected = {"jsonrpc": "2.0", "method": "stdin", "params": {"eof": True}}
compare_dicts(expected, json.loads(sock.send_queue[1]))
def test_find_executable():
""" Test searching for executable files """
oldpath = os.environ.get("PATH")
# If path contains a slash it's returned verbatim
assert pals.find_executable("foo/bar") == "foo/bar"
# If no PATH is set it's returned verbatim
if "PATH" in os.environ:
del os.environ["PATH"]
assert pals.find_executable("foobar") == "foobar"
# Test with something we should find
if os.path.exists("/usr/bin/true") and os.access("/usr/bin/true", os.X_OK):
os.environ["PATH"] = "/foobar:/usr/bin:/bin"
assert pals.find_executable("true") == "/usr/bin/true"
# Test with something we shouldn't find
os.environ["PATH"] = "."
assert pals.find_executable("foobar") is None
# Reset PATH
os.environ["PATH"] = oldpath
def test_get_executables():
""" Test getting the set of binaries to transfer """
req = {
"cmds": [
{"argv": ["/bin/pwd"]},
{"argv": ["/bin/pwd"]},
{"argv": ["/bin/echo", "foo"]},
]
}
# transfer=false doesn't modify argv
assert pals.get_executables(req, False) == set(["/bin/pwd", "/bin/echo"])
assert req["cmds"][0]["argv"] == ["/bin/pwd"]
assert req["cmds"][1]["argv"] == ["/bin/pwd"]
assert req["cmds"][2]["argv"] == ["/bin/echo", "foo"]
# transfer=true modifies argv
assert pals.get_executables(req, True) == set(["/bin/pwd", "/bin/echo"])
assert req["cmds"][0]["argv"] == ["pwd"]
assert req["cmds"][1]["argv"] == ["pwd"]
assert req["cmds"][2]["argv"] == ["echo", "foo"]
def test_split_mpmd_args():
""" Test splitting MPMD arguments """
assert pals.split_mpmd_args(["hostname"]) == [["hostname"]]
assert pals.split_mpmd_args(["hostname", ":", "echo"]) == [["hostname"], ["echo"]]
assert pals.split_mpmd_args(["hostname", "-v", ":", "-n4", "echo", "foo"]) == [
["hostname", "-v"],
["-n4", "echo", "foo"],
]
def test_get_resource_limits():
""" Test fetching resource limits """
# pylint: disable=use-implicit-booleaness-not-comparison
assert pals.get_resource_limits([]) == {}
# pylint: disable=use-implicit-booleaness-not-comparison
assert pals.get_resource_limits(["foo"]) == {}
soft, hard = resource.getrlimit(
resource.RLIMIT_CORE
) # pylint: disable=c-extension-no-member
assert pals.get_resource_limits(["CORE"]) == {"CORE": "%d %d" % (soft, hard)}
def test_parse_hostfile():
""" Test host file parsing """
hostfile = io.StringIO("\n# comment line\nhost1\n host2 \n")
assert pals.parse_hostfile(hostfile) == ["host1", "host2"]
hostfile.close()
with pytest.raises(ValueError):
pals.parse_hostfile(hostfile)
hostfile = io.StringIO("\n# comment line\nhost1\n host1 \n")
assert pals.parse_hostfile(hostfile) == ["host1", "host1"]
hostfile.close()
| en | 0.764823 | # MIT License # # (C) Copyright [2020-2022] Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. test_pals.py - Unit tests for the pals module # pylint: disable=comparison-with-callable Test signal handling setup Test making a websocket URL from an API gateway URL Test JSON-RPC request creation Mock socket class that receives canned content Receive a message from the socket Send a message to the socket Test RPC sending Test exit status to exit code translation # Test out extremes for exit status # Test out extremes for termination signals # Core dump shouldn't affect exit code Test logging rank exits (mainly for coverage) # Log shepherd exits # Log rank exits # pylint: disable=too-many-locals Test handling PALS RPCs # Make temporary file for procinfo Test forwarding stdin to application # Send some UTF-8 content # Send some non-UTF-8 content Test searching for executable files # If path contains a slash it's returned verbatim # If no PATH is set it's returned verbatim # Test with something we should find # Test with something we shouldn't find # Reset PATH Test getting the set of binaries to transfer # transfer=false doesn't modify argv # transfer=true modifies argv Test splitting MPMD arguments Test fetching resource limits # pylint: disable=use-implicit-booleaness-not-comparison # pylint: disable=use-implicit-booleaness-not-comparison # pylint: disable=c-extension-no-member Test host file parsing # comment line\nhost1\n host2 \n") # comment line\nhost1\n host1 \n") | 1.506318 | 2 |
boids/Flock.py | stiebels/boids | 0 | 6633125 | import random
import numpy as np
from .Boid import Boid
'''
Implements Flock as controlling instance of each Boid
'''
class Flock(object):
# Flock is controlling instances of Boid
def __init__(self, size, fly_middle_strength, fly_away_limit, speed_match_strength, distance_limit,
x_coord_range, y_coord_range,
x_velo_range, y_velo_range):
self.size = size
self.x_coord_range = x_coord_range
self.y_coord_range = y_coord_range
self.x_velo_range = x_velo_range
self.y_velo_range = y_velo_range
self.boids = self.create_boids()
self.fly_middle_strength = fly_middle_strength
self.fly_away_limit = fly_away_limit
self.speed_match_strength = speed_match_strength
self.distance_limit = distance_limit
def create_boids(self):
# Creates Boid objects
boids = np.asarray([Boid(
random.uniform(self.x_coord_range[0], self.x_coord_range[1]),
random.uniform(self.y_coord_range[0], self.y_coord_range[1]),
random.uniform(self.x_velo_range[0], self.x_velo_range[1]),
random.uniform(self.y_velo_range[0], self.y_velo_range[1])
) for boid in range(0, self.size)])
return boids
def update_boids(self):
# Invokes computation of new position of each Boid
for boid in self.boids:
boid.fly_middle(self, fly_middle_strength=self.fly_middle_strength)
for boid in self.boids:
boid.fly_away(self, fly_away_limit=self.fly_away_limit)
for boid in self.boids:
boid.match_speed(self, speed_match_strength=self.speed_match_strength, distance_limit=self.distance_limit)
boid.move()
| import random
import numpy as np
from .Boid import Boid
'''
Implements Flock as controlling instance of each Boid
'''
class Flock(object):
# Flock is controlling instances of Boid
def __init__(self, size, fly_middle_strength, fly_away_limit, speed_match_strength, distance_limit,
x_coord_range, y_coord_range,
x_velo_range, y_velo_range):
self.size = size
self.x_coord_range = x_coord_range
self.y_coord_range = y_coord_range
self.x_velo_range = x_velo_range
self.y_velo_range = y_velo_range
self.boids = self.create_boids()
self.fly_middle_strength = fly_middle_strength
self.fly_away_limit = fly_away_limit
self.speed_match_strength = speed_match_strength
self.distance_limit = distance_limit
def create_boids(self):
# Creates Boid objects
boids = np.asarray([Boid(
random.uniform(self.x_coord_range[0], self.x_coord_range[1]),
random.uniform(self.y_coord_range[0], self.y_coord_range[1]),
random.uniform(self.x_velo_range[0], self.x_velo_range[1]),
random.uniform(self.y_velo_range[0], self.y_velo_range[1])
) for boid in range(0, self.size)])
return boids
def update_boids(self):
# Invokes computation of new position of each Boid
for boid in self.boids:
boid.fly_middle(self, fly_middle_strength=self.fly_middle_strength)
for boid in self.boids:
boid.fly_away(self, fly_away_limit=self.fly_away_limit)
for boid in self.boids:
boid.match_speed(self, speed_match_strength=self.speed_match_strength, distance_limit=self.distance_limit)
boid.move()
| en | 0.8357 | Implements Flock as controlling instance of each Boid # Flock is controlling instances of Boid # Creates Boid objects # Invokes computation of new position of each Boid | 2.951404 | 3 |
users/models/__init__.py | recentfahim/smartbusinessbd | 0 | 6633126 | <gh_stars>0
from .contact import Contact
from .user import User
from .payments import Payment
from .subscription import Subscription
from .temp_data import TempData
| from .contact import Contact
from .user import User
from .payments import Payment
from .subscription import Subscription
from .temp_data import TempData | none | 1 | 1.076764 | 1 |
|
settings.py | iag-geo/concord | 0 | 6633127 | # takes the command line parameters and creates a dictionary of setting_dict
import os
import argparse
import platform
import psycopg2
import sys
from datetime import datetime
# get latest Geoscape release version as YYYYMM, as of the date provided, as well as the prev. version 3 months prior
def get_geoscape_version(date):
month = date.month
year = date.year
if month == 1:
gs_version = str(year - 1) + "11"
previous_gs_version = str(year - 1) + "08"
elif 2 <= month < 5:
gs_version = str(year) + "02"
previous_gs_version = str(year - 1) + "11"
elif 5 <= month < 8:
gs_version = str(year) + "05"
previous_gs_version = str(year) + "02"
elif 8 <= month < 11:
gs_version = str(year) + "08"
previous_gs_version = str(year) + "05"
else:
gs_version = str(year) + "11"
previous_gs_version = str(year) + "08"
return gs_version, previous_gs_version
# get python, psycopg2 and OS versions
python_version = sys.version.split("(")[0].strip()
psycopg2_version = psycopg2.__version__.split("(")[0].strip()
os_version = platform.system() + " " + platform.version().strip()
# get the command line arguments for the script
parser = argparse.ArgumentParser(
description="A CSV file and supporting scripts for converting data between Australian boundaries.")
# PG Options
parser.add_argument(
"--pghost",
help="Host name for Postgres server. Defaults to PGHOST environment variable if set, otherwise localhost.")
parser.add_argument(
"--pgport", type=int,
help="Port number for Postgres server. Defaults to PGPORT environment variable if set, otherwise 5432.")
parser.add_argument(
"--pgdb",
help="Database name for Postgres server. Defaults to PGDATABASE environment variable if set, "
"otherwise geoscape.")
parser.add_argument(
"--pguser",
help="Username for Postgres server. Defaults to PGUSER environment variable if set, otherwise postgres.")
parser.add_argument(
"--pgpassword",
help="Password for Postgres server. Defaults to PGPASSWORD environment variable if set, "
"otherwise \"password\".")
# schema names for the raw gnaf, flattened reference and admin boundary tables
geoscape_version, previous_geoscape_version = get_geoscape_version(datetime.today())
parser.add_argument(
"--geoscape-version", default=geoscape_version,
help="Geoscape release version number as YYYYMM. Defaults to latest release year and month \""
+ geoscape_version + "\".")
# parser.add_argument(
# "--previous-geoscape-version", default=previous_geoscape_version,
# help="Previous Geoscape release version number as YYYYMM; used for QA comparison. "
# "Defaults to \"" + previous_geoscape_version + "\".")
parser.add_argument(
"--gnaf-schema",
help="Input schema name to store final GNAF tables in. Also the output schema for the concordance table."
"Defaults to \"gnaf_" + geoscape_version + "\".")
parser.add_argument(
"--admin-schema",
help="Input schema name to store final admin boundary tables in. Defaults to \"admin_bdys_"
+ geoscape_version + "\".")
# parser.add_argument(
# "--previous-gnaf-schema",
# help="Schema with previous version of GNAF tables in. Defaults to \"gnaf_" + previous_geoscape_version + "\".")
# parser.add_argument(
# "--previous-admin-schema",
# help="Schema with previous version of GNAF tables in. Defaults to \"admin_bdys_"
# + previous_geoscape_version + "\".")
# output file/table name & directory
parser.add_argument(
"--output-table",
help="Name of both output concordance table and file. Defaults to 'boundary_concordance'.")
parser.add_argument(
"--output-score_table",
help="Name of both output concordance QA table and file. Defaults to '<output_table>_score'.")
parser.add_argument(
"--output-path", required=True,
help="Local path where the boundary concordance files will be output.")
# global var containing all input parameters
args = parser.parse_args()
# assign parameters to global settings
gnaf_schema = args.gnaf_schema or "gnaf_" + geoscape_version
admin_bdys_schema = args.admin_schema or "admin_bdys_" + geoscape_version
# previous_gnaf_schema = args.previous_gnaf_schema or "gnaf_" + previous_geoscape_version
# previous_admin_bdys_schema = args.previous_admin_schema or "admin_bdys_" + previous_geoscape_version
output_path = args.output_path
output_table = args.output_table or "boundary_concordance"
output_score_table = args.output_score_table or f"{output_table}_score"
# create postgres connect string
pg_host = args.pghost or os.getenv("PGHOST", "localhost")
pg_port = args.pgport or os.getenv("PGPORT", 5432)
pg_db = args.pgdb or os.getenv("PGDATABASE", "geoscape")
pg_user = args.pguser or os.getenv("PGUSER", "postgres")
pg_password = args.pgpassword or os.getenv("PGPASSWORD", "password")
pg_connect_string = f"dbname='{pg_db}' host='{pg_host}' port='{pg_port}' user='{pg_user}' password='{pg_password}'"
# set postgres script directory
sql_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "postgres-scripts")
# ABS ASGS boundaries that align 100% - do not edit
asgs_concordance_list = ["sa1", "sa2", "sa3", "sa4", "gcc"]
# asgs_concordance_list = ["mb", "sa1", "sa2", "sa3", "sa4", "gcc", "state"]
# ---------------------------------------------------------------------------------------
# edit boundary list to find concordances with
# ---------------------------------------------------------------------------------------
# sources of address level data with boundary tags - names are hardcoded, don't edit them!
source_list = [
{"name": "abs 2016", "schema": gnaf_schema, "table": "address_principal_census_2016_boundaries"},
{"name": "abs 2021", "schema": gnaf_schema, "table": "address_principal_census_2021_boundaries"},
{"name": f"geoscape {geoscape_version}", "schema": gnaf_schema, "table": "address_principal_admin_boundaries"}
]
# source of residential addresses to filter on - this will either be based on ABS Census 2021 meshblocks
# or planning zone data from the Geoscape Buildings datasets (licensed dataset)
# residential_address_source = {"name": "geoscape", "schema": "geoscape_202203",
# "table": "address_principals_buildings"}
# residential_address_source = {"name": "abs 2016", "schema": gnaf_schema,
# "table": "address_principal_census_2016_boundaries"}
residential_address_source = {"name": "abs 2021", "schema": gnaf_schema,
"table": "address_principal_census_2021_boundaries"}
# the list of boundary pairs to create concordances - from and to sources must match the names of the above sources
# don't include ASGS ABS boundary pairs that are nested (e.g. SA2 > SA3);
# they have their own lookup table and are added automatically
boundary_list = [
# ABS 2016 to ABS 2016 bdys
{"from": "sa2", "from_source": "abs 2016", "to": "poa", "to_source": "abs 2016"},
{"from": "sa2", "from_source": "abs 2016", "to": "lga", "to_source": "abs 2016"},
{"from": "poa", "from_source": "abs 2016", "to": "sa2", "to_source": "abs 2016"},
{"from": "poa", "from_source": "abs 2016", "to": "lga", "to_source": "abs 2016"},
{"from": "sa3", "from_source": "abs 2016", "to": "lga", "to_source": "abs 2016"},
# only 25% concordance with a ~14% error
# {"from": "lga", "from_source": "abs 2016", "to": "poa", "to_source": "abs 2016"},
{"from": "lga", "from_source": "abs 2016", "to": "sa3", "to_source": "abs 2016"},
# Geoscape to ABS 2016 bdys
{"from": "locality", "from_source": f"geoscape {geoscape_version}", "to": "sa2", "to_source": "abs 2016"},
{"from": "locality", "from_source": f"geoscape {geoscape_version}", "to": "sa3", "to_source": "abs 2016"},
{"from": "locality", "from_source": f"geoscape {geoscape_version}", "to": "lga", "to_source": "abs 2016"},
{"from": "postcode", "from_source": f"geoscape {geoscape_version}", "to": "sa3", "to_source": "abs 2016"},
# TODO: handle the "duplicate" postcodes that go over state borders
# {"from": "postcode", "from_source": f"geoscape {geoscape_version}", "to": "poa", "to_source": "abs 2016"},
{"from": "postcode", "from_source": f"geoscape {geoscape_version}", "to": "lga", "to_source": "abs 2016"},
{"from": "lga", "from_source": f"geoscape {geoscape_version}", "to": "lga", "to_source": "abs 2016"},
# Geoscape to Geoscape bdys
{"from": "locality", "from_source": f"geoscape {geoscape_version}",
"to": "lga", "to_source": f"geoscape {geoscape_version}"},
{"from": "postcode", "from_source": f"geoscape {geoscape_version}",
"to": "lga", "to_source": f"geoscape {geoscape_version}"}
# # test concordance for measuring reliability against known differences
# {"from": "sa2", "from_source": "abs 2016", "to": "sa2", "to_source": "abs 2021"}
# TODO: add ABS Census 2016 to 2021 correspondences using official ABS files (assuming there"s a demand)
]
# ---------------------------------------------------------------------------------------
| # takes the command line parameters and creates a dictionary of setting_dict
import os
import argparse
import platform
import psycopg2
import sys
from datetime import datetime
# get latest Geoscape release version as YYYYMM, as of the date provided, as well as the prev. version 3 months prior
def get_geoscape_version(date):
month = date.month
year = date.year
if month == 1:
gs_version = str(year - 1) + "11"
previous_gs_version = str(year - 1) + "08"
elif 2 <= month < 5:
gs_version = str(year) + "02"
previous_gs_version = str(year - 1) + "11"
elif 5 <= month < 8:
gs_version = str(year) + "05"
previous_gs_version = str(year) + "02"
elif 8 <= month < 11:
gs_version = str(year) + "08"
previous_gs_version = str(year) + "05"
else:
gs_version = str(year) + "11"
previous_gs_version = str(year) + "08"
return gs_version, previous_gs_version
# get python, psycopg2 and OS versions
python_version = sys.version.split("(")[0].strip()
psycopg2_version = psycopg2.__version__.split("(")[0].strip()
os_version = platform.system() + " " + platform.version().strip()
# get the command line arguments for the script
parser = argparse.ArgumentParser(
description="A CSV file and supporting scripts for converting data between Australian boundaries.")
# PG Options
parser.add_argument(
"--pghost",
help="Host name for Postgres server. Defaults to PGHOST environment variable if set, otherwise localhost.")
parser.add_argument(
"--pgport", type=int,
help="Port number for Postgres server. Defaults to PGPORT environment variable if set, otherwise 5432.")
parser.add_argument(
"--pgdb",
help="Database name for Postgres server. Defaults to PGDATABASE environment variable if set, "
"otherwise geoscape.")
parser.add_argument(
"--pguser",
help="Username for Postgres server. Defaults to PGUSER environment variable if set, otherwise postgres.")
parser.add_argument(
"--pgpassword",
help="Password for Postgres server. Defaults to PGPASSWORD environment variable if set, "
"otherwise \"password\".")
# schema names for the raw gnaf, flattened reference and admin boundary tables
geoscape_version, previous_geoscape_version = get_geoscape_version(datetime.today())
parser.add_argument(
"--geoscape-version", default=geoscape_version,
help="Geoscape release version number as YYYYMM. Defaults to latest release year and month \""
+ geoscape_version + "\".")
# parser.add_argument(
# "--previous-geoscape-version", default=previous_geoscape_version,
# help="Previous Geoscape release version number as YYYYMM; used for QA comparison. "
# "Defaults to \"" + previous_geoscape_version + "\".")
parser.add_argument(
"--gnaf-schema",
help="Input schema name to store final GNAF tables in. Also the output schema for the concordance table."
"Defaults to \"gnaf_" + geoscape_version + "\".")
parser.add_argument(
"--admin-schema",
help="Input schema name to store final admin boundary tables in. Defaults to \"admin_bdys_"
+ geoscape_version + "\".")
# parser.add_argument(
# "--previous-gnaf-schema",
# help="Schema with previous version of GNAF tables in. Defaults to \"gnaf_" + previous_geoscape_version + "\".")
# parser.add_argument(
# "--previous-admin-schema",
# help="Schema with previous version of GNAF tables in. Defaults to \"admin_bdys_"
# + previous_geoscape_version + "\".")
# output file/table name & directory
parser.add_argument(
"--output-table",
help="Name of both output concordance table and file. Defaults to 'boundary_concordance'.")
parser.add_argument(
"--output-score_table",
help="Name of both output concordance QA table and file. Defaults to '<output_table>_score'.")
parser.add_argument(
"--output-path", required=True,
help="Local path where the boundary concordance files will be output.")
# global var containing all input parameters
args = parser.parse_args()
# assign parameters to global settings
gnaf_schema = args.gnaf_schema or "gnaf_" + geoscape_version
admin_bdys_schema = args.admin_schema or "admin_bdys_" + geoscape_version
# previous_gnaf_schema = args.previous_gnaf_schema or "gnaf_" + previous_geoscape_version
# previous_admin_bdys_schema = args.previous_admin_schema or "admin_bdys_" + previous_geoscape_version
output_path = args.output_path
output_table = args.output_table or "boundary_concordance"
output_score_table = args.output_score_table or f"{output_table}_score"
# create postgres connect string
pg_host = args.pghost or os.getenv("PGHOST", "localhost")
pg_port = args.pgport or os.getenv("PGPORT", 5432)
pg_db = args.pgdb or os.getenv("PGDATABASE", "geoscape")
pg_user = args.pguser or os.getenv("PGUSER", "postgres")
pg_password = args.pgpassword or os.getenv("PGPASSWORD", "password")
pg_connect_string = f"dbname='{pg_db}' host='{pg_host}' port='{pg_port}' user='{pg_user}' password='{pg_password}'"
# set postgres script directory
sql_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "postgres-scripts")
# ABS ASGS boundaries that align 100% - do not edit
asgs_concordance_list = ["sa1", "sa2", "sa3", "sa4", "gcc"]
# asgs_concordance_list = ["mb", "sa1", "sa2", "sa3", "sa4", "gcc", "state"]
# ---------------------------------------------------------------------------------------
# edit boundary list to find concordances with
# ---------------------------------------------------------------------------------------
# sources of address level data with boundary tags - names are hardcoded, don't edit them!
source_list = [
{"name": "abs 2016", "schema": gnaf_schema, "table": "address_principal_census_2016_boundaries"},
{"name": "abs 2021", "schema": gnaf_schema, "table": "address_principal_census_2021_boundaries"},
{"name": f"geoscape {geoscape_version}", "schema": gnaf_schema, "table": "address_principal_admin_boundaries"}
]
# source of residential addresses to filter on - this will either be based on ABS Census 2021 meshblocks
# or planning zone data from the Geoscape Buildings datasets (licensed dataset)
# residential_address_source = {"name": "geoscape", "schema": "geoscape_202203",
# "table": "address_principals_buildings"}
# residential_address_source = {"name": "abs 2016", "schema": gnaf_schema,
# "table": "address_principal_census_2016_boundaries"}
residential_address_source = {"name": "abs 2021", "schema": gnaf_schema,
"table": "address_principal_census_2021_boundaries"}
# the list of boundary pairs to create concordances - from and to sources must match the names of the above sources
# don't include ASGS ABS boundary pairs that are nested (e.g. SA2 > SA3);
# they have their own lookup table and are added automatically
boundary_list = [
# ABS 2016 to ABS 2016 bdys
{"from": "sa2", "from_source": "abs 2016", "to": "poa", "to_source": "abs 2016"},
{"from": "sa2", "from_source": "abs 2016", "to": "lga", "to_source": "abs 2016"},
{"from": "poa", "from_source": "abs 2016", "to": "sa2", "to_source": "abs 2016"},
{"from": "poa", "from_source": "abs 2016", "to": "lga", "to_source": "abs 2016"},
{"from": "sa3", "from_source": "abs 2016", "to": "lga", "to_source": "abs 2016"},
# only 25% concordance with a ~14% error
# {"from": "lga", "from_source": "abs 2016", "to": "poa", "to_source": "abs 2016"},
{"from": "lga", "from_source": "abs 2016", "to": "sa3", "to_source": "abs 2016"},
# Geoscape to ABS 2016 bdys
{"from": "locality", "from_source": f"geoscape {geoscape_version}", "to": "sa2", "to_source": "abs 2016"},
{"from": "locality", "from_source": f"geoscape {geoscape_version}", "to": "sa3", "to_source": "abs 2016"},
{"from": "locality", "from_source": f"geoscape {geoscape_version}", "to": "lga", "to_source": "abs 2016"},
{"from": "postcode", "from_source": f"geoscape {geoscape_version}", "to": "sa3", "to_source": "abs 2016"},
# TODO: handle the "duplicate" postcodes that go over state borders
# {"from": "postcode", "from_source": f"geoscape {geoscape_version}", "to": "poa", "to_source": "abs 2016"},
{"from": "postcode", "from_source": f"geoscape {geoscape_version}", "to": "lga", "to_source": "abs 2016"},
{"from": "lga", "from_source": f"geoscape {geoscape_version}", "to": "lga", "to_source": "abs 2016"},
# Geoscape to Geoscape bdys
{"from": "locality", "from_source": f"geoscape {geoscape_version}",
"to": "lga", "to_source": f"geoscape {geoscape_version}"},
{"from": "postcode", "from_source": f"geoscape {geoscape_version}",
"to": "lga", "to_source": f"geoscape {geoscape_version}"}
# # test concordance for measuring reliability against known differences
# {"from": "sa2", "from_source": "abs 2016", "to": "sa2", "to_source": "abs 2021"}
# TODO: add ABS Census 2016 to 2021 correspondences using official ABS files (assuming there"s a demand)
]
# ---------------------------------------------------------------------------------------
| en | 0.61736 | # takes the command line parameters and creates a dictionary of setting_dict # get latest Geoscape release version as YYYYMM, as of the date provided, as well as the prev. version 3 months prior # get python, psycopg2 and OS versions # get the command line arguments for the script # PG Options # schema names for the raw gnaf, flattened reference and admin boundary tables # parser.add_argument( # "--previous-geoscape-version", default=previous_geoscape_version, # help="Previous Geoscape release version number as YYYYMM; used for QA comparison. " # "Defaults to \"" + previous_geoscape_version + "\".") # parser.add_argument( # "--previous-gnaf-schema", # help="Schema with previous version of GNAF tables in. Defaults to \"gnaf_" + previous_geoscape_version + "\".") # parser.add_argument( # "--previous-admin-schema", # help="Schema with previous version of GNAF tables in. Defaults to \"admin_bdys_" # + previous_geoscape_version + "\".") # output file/table name & directory # global var containing all input parameters # assign parameters to global settings # previous_gnaf_schema = args.previous_gnaf_schema or "gnaf_" + previous_geoscape_version # previous_admin_bdys_schema = args.previous_admin_schema or "admin_bdys_" + previous_geoscape_version # create postgres connect string # set postgres script directory # ABS ASGS boundaries that align 100% - do not edit # asgs_concordance_list = ["mb", "sa1", "sa2", "sa3", "sa4", "gcc", "state"] # --------------------------------------------------------------------------------------- # edit boundary list to find concordances with # --------------------------------------------------------------------------------------- # sources of address level data with boundary tags - names are hardcoded, don't edit them! # source of residential addresses to filter on - this will either be based on ABS Census 2021 meshblocks # or planning zone data from the Geoscape Buildings datasets (licensed dataset) # residential_address_source = {"name": "geoscape", "schema": "geoscape_202203", # "table": "address_principals_buildings"} # residential_address_source = {"name": "abs 2016", "schema": gnaf_schema, # "table": "address_principal_census_2016_boundaries"} # the list of boundary pairs to create concordances - from and to sources must match the names of the above sources # don't include ASGS ABS boundary pairs that are nested (e.g. SA2 > SA3); # they have their own lookup table and are added automatically # ABS 2016 to ABS 2016 bdys # only 25% concordance with a ~14% error # {"from": "lga", "from_source": "abs 2016", "to": "poa", "to_source": "abs 2016"}, # Geoscape to ABS 2016 bdys # TODO: handle the "duplicate" postcodes that go over state borders # {"from": "postcode", "from_source": f"geoscape {geoscape_version}", "to": "poa", "to_source": "abs 2016"}, # Geoscape to Geoscape bdys # # test concordance for measuring reliability against known differences # {"from": "sa2", "from_source": "abs 2016", "to": "sa2", "to_source": "abs 2021"} # TODO: add ABS Census 2016 to 2021 correspondences using official ABS files (assuming there"s a demand) # --------------------------------------------------------------------------------------- | 3.090209 | 3 |
transitions_kpi.py | regisb/repo-tools | 23 | 6633128 | <filename>transitions_kpi.py
#!/usr/bin/env python
"""
Scrapes and parses information from JIRA's transition states.
Runs the JIRA spider, then parses the output states.json
file to obtain KPI information.
See https://openedx.atlassian.net/wiki/display/OPEN/Tracking+edX+Commitment+To+OSPRs
"""
from collections import OrderedDict
from functools import reduce
from subprocess import check_call
import argparse
import datetime
import dateutil.parser
import json
import numpy
import operator
import sys
EDX_ENGINEERING_STATES = [
'Needs Triage',
'Product Review',
'Community Manager Review',
'Awaiting Prioritization',
'Engineering Review',
]
def scrape_jira():
"""
Re-scrapes jira into states.json
"""
# Delete content of states.json before re-writing
with open("states.json", "w"):
pass
print("Running scrapy spider over JIRA...")
check_call("scrapy runspider jiraspider.py -o states.json".split(" "))
print("-" * 20)
def engineering_time_spent(state_dict):
"""
Given a ticket's state dictionary, returns how much engineering time was spent on it.
Engineering states determined by EDX_ENGINEERING_STATES list.
"""
# Measurement 1: Average Time Spent by edX Engineering
# This measurement will sum up the amount of time it takes the engineering team to process OSPR work.
# AverageTime = sum(amount of time a ticket spends in edX states) / count(all tickets)
# This will be a rolling average over all tickets currently open, or closed in the past X days.
# In the initial rollout of this measurement, we'll track for X=14, 30, and 60 days. After we have a few months'
# worth of data, we can assess what historical interval(s) gives us the most useful, actionable data.
# This is a measurement across all of engineering. We are not proposing to measure teams individually.
total_time = datetime.timedelta(0)
for state, tdelta in state_dict.iteritems():
if state in EDX_ENGINEERING_STATES:
total_time += tdelta
return total_time
def single_state_time_spent(state_dict, state):
"""
Given a ticket's state dictionary, returns how much time it spent
in the given `state`.
Assumes state_dict has the key `state` present.
"""
# Measurement 2: Average Time Spent in Scrum Team Backlog
# For the PRs that need to be reviewed by a scrum team, obtain an average of how long a ticket spends in a team backlog.
# AverageBacklog = sum(amount of time a ticket spends in "Awaiting Prioritization") /
# count(tickets with a non-zero amount of time spent in "Awaiting Prioritization")
# This will be a rolling average over all tickets currently open, or closed in the past X days.
# In the initial rollout of this measurement, we'll track for X=14, 30, and 60 days. After we have a few months'
# worth of data, we can assess what historical interval(s) gives us the most useful, actionable data.
return state_dict[state]
def sanitize_ticket_states(state_dict):
"""
Converts timedelta strings back into timedeltas.
These were explicitly serialized as '{0.days}:{0.seconds}'.format(tdelta)
"""
result = {}
for state, tdelta in state_dict.iteritems():
tdict = {'days': tdelta[0], 'seconds': tdelta[1]}
result[state] = datetime.timedelta(**tdict)
return result
def parse_jira_info(debug=False, pretty=False):
"""
Read in and parse states.json
Converts json representations of time to datetime objects, then returns a list of
ticket dictionaries.
"""
with open("states.json") as state_file:
# tickets is a list composed of state dictionaries for each ospr ticket.
# Keys are: 'issue' -> string, 'states' -> dict, 'labels' -> list,
# Optional keys are: 'resolution' -> list, 'debug' -> string, 'error' -> string
tickets = json.load(state_file)
# Go through tickets and sanitize data, report errors, etc
for ticket in tickets:
# Report any errors / debug messages
if ticket.get('error', False):
print("Error in ticket {}: {}".format(ticket['issue'], ticket['error']))
if debug and ticket.get('debug', False):
print("Debug: ticket {}: {}".format(ticket['issue'], ticket['debug']))
# Deal with "resolved" datetime
if ticket.get('resolved', False):
# Turn str(datetime) back into a datetime object
ticket['resolved'] = dateutil.parser.parse(ticket['resolved'])
else:
# Ticket is not yet resolved. Set "resolved" date to right now, so it'll
# show up in the filter for being resolved within the past X days (hack for cleaner code)
ticket['resolved'] = datetime.datetime.now()
# Sanitize ticket state dict (need to convert time strings to timedeltas)
if ticket.get('states', False):
ticket['states'] = sanitize_ticket_states(ticket['states'])
else:
# This shouldn't happen so something's going wrong
print("No states for ticket {}".format(ticket['issue']))
return tickets
def get_time_lists(tickets, num_past_days=0):
"""
Iterates over tickets, collecting lists of how much time was spent in various states.
Returns: dictionary of {'name': [datetime.timedelta,]}
- Time each ticket spent in all engineering states
- Time each ticket spent in triage
- Time each ticket spent in product review
- Time each ticket spent in team backlogs
"""
# Set up vars
eng_time_spent, triage_time_spent, product_time, backlog_time = [], [], [], []
date_x_days_ago = datetime.datetime.now() - datetime.timedelta(days=num_past_days)
# Go through tickets again, gathering and reporting information
for ticket in tickets:
# If we're restricting to past days, and the ticket was resolved longer ago
# than our limit, skip it.
if num_past_days > 0 and ticket['resolved'] < date_x_days_ago:
continue
# Get amount of time this spent in "Needs Triage" (roughly, time to first response)
triage_time_spent.append(single_state_time_spent(ticket['states'], 'Needs Triage'))
# Calculate total time spent by engineering team on this ticket
eng_time_spent.append(engineering_time_spent(ticket['states']))
# Get time spent in backlog
if ticket['states'].get('Awaiting Prioritization', False):
backlog_time.append(single_state_time_spent(ticket['states'], 'Awaiting Prioritization'))
# Get time spent in product review
if ticket['states'].get('Product Review', False):
product_time.append(single_state_time_spent(ticket['states'], 'Product Review'))
result = OrderedDict()
list_order = ['edX engineering states', 'Needs Triage', 'Product Review', 'Team Backlog']
lists = (eng_time_spent, triage_time_spent, product_time, backlog_time)
for name, lst in zip(list_order, lists):
result[name] = lst
return result
def avg_time_spent(time_spent):
"""
Returns the average time spent over the number of tickets.
"""
# Can't use numpy or other standards because sum() won't work with
# a list of datetime.timedeltas
return reduce(operator.add, time_spent, datetime.timedelta(0)) / len(time_spent)
def std_dev(time_spent):
"""
Standard deviation of the list.
Calculation follows formula std = sqrt(mean( (x - x.mean())**2 ) )
"""
avg = avg_time_spent(time_spent)
summation = 0
for sample in time_spent:
diff = sample - avg
summation += diff.total_seconds()**2
variance = summation / len(time_spent)
std = int(numpy.sqrt(variance))
return datetime.timedelta(seconds=std)
def make_percentile(qper):
"""
Returns a percentile function for the given numeric qper
qper: Float in range of [0,100]. Percentile to compute which must be
between 0 and 100 inclusive.
"""
def percentile(time_spent):
"""
Returns the qth percentile of the tickets
"""
seconds_spent = map(datetime.timedelta.total_seconds, time_spent)
raw_result = numpy.percentile(seconds_spent, qper)
return datetime.timedelta(seconds=raw_result)
return percentile
def pretty_print_time(time, message=None):
"""Pretty print the given time"""
days = time.days
hours, remainder = divmod(time.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if message is not None:
print(message)
print(f'\t {days} days, {hours} hours, {minutes} minutes, {seconds} seconds')
# Print out fractional days things have been in this state
total = days + hours/24. + minutes/(24.*60) + seconds/(24.*3600)
return f"{total:.1f}"
def get_stats(time_spent, functions, pretty=False):
"""
Given a list of times and a list of stats functions, prints out all the
stats over the list (optionally in a pretty format)
"""
header = ''
results = ''
for func, fname in functions:
output = func(time_spent)
if pretty:
msg = f'{fname} time spent'
pretty_print_time(output, msg)
else:
# build up a string to print out.
header += f"{fname}\t|"
results += "{}\t ".format(pretty_print_time(output))
if not pretty:
print(header + 'Num tickets')
print(results + '{}\n'.format(len(time_spent)))
def main(argv):
"""a docstring for main, really?"""
parser = argparse.ArgumentParser(description="Summarize JIRA info.")
parser.add_argument(
"--no-scrape", action="store_true",
help="Don't re-run the scraper, just read the current states.json file"
)
parser.add_argument(
"--since", metavar="DAYS", type=int, default=0,
help="Only consider unresolved PRs & PRs closed in the past DAYS days"
)
parser.add_argument(
"--debug", action="store_true",
help="Show debugging messages"
)
parser.add_argument(
"--pretty", action="store_true",
help="Pretty print output"
)
parser.add_argument(
"--average", action="store_true",
help="Print out the average time spent in each of 4 states"
)
parser.add_argument(
"--median", action="store_true",
help="Print out the median time spent in each of 4 states"
)
parser.add_argument(
"--percentile", type=float,
help="Print out the qth percentile of all tickets in each state"
)
parser.add_argument(
"--std-dev", action="store_true",
help="Print out the standard deviation across the data"
)
parser.add_argument(
"--max", action="store_true",
help="Show the maximum time in the series"
)
parser.add_argument(
"--min", action="store_true",
help="Show the minimum time in the series"
)
parser.add_argument(
"--all", action="store_true",
help="Show all statistics"
)
args = parser.parse_args(argv[1:])
# Parse out what functions we want to gather for this report
functions = []
if args.average or args.all:
functions.append((avg_time_spent, 'Average'))
if args.median or args.all:
median_time_spent = make_percentile(50)
functions.append((median_time_spent, 'Median'))
if args.percentile or args.all:
pnum = args.percentile or 95
pfunc = make_percentile(pnum)
functions.append((pfunc, f'{pnum}%'))
if args.std_dev or args.all:
functions.append((std_dev, 'StdDev'))
if args.max or args.all:
functions.append((lambda lst: max(lst), 'Max'))
if args.min or args.all:
functions.append((lambda lst: min(lst), 'Min'))
if len(functions) == 0:
print("Alert: No statistical functions specified. Please use '--help' to see which are available, or use '--all' to run all.")
return
# Scrape jira unless told otherwise
if not args.no_scrape:
scrape_jira()
# Parse states.json into times list
tickets = parse_jira_info(args.debug, args.pretty)
# Gets {'list name': list}
ticket_lists = get_time_lists(tickets, args.since)
for list_name, time_spent in ticket_lists.iteritems():
print("-" * 40)
num_tix = len(time_spent)
print(f"Statistics for '{list_name}', over {num_tix} tickets")
print("-" * 40)
get_stats(time_spent, functions, args.pretty)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| <filename>transitions_kpi.py
#!/usr/bin/env python
"""
Scrapes and parses information from JIRA's transition states.
Runs the JIRA spider, then parses the output states.json
file to obtain KPI information.
See https://openedx.atlassian.net/wiki/display/OPEN/Tracking+edX+Commitment+To+OSPRs
"""
from collections import OrderedDict
from functools import reduce
from subprocess import check_call
import argparse
import datetime
import dateutil.parser
import json
import numpy
import operator
import sys
EDX_ENGINEERING_STATES = [
'Needs Triage',
'Product Review',
'Community Manager Review',
'Awaiting Prioritization',
'Engineering Review',
]
def scrape_jira():
"""
Re-scrapes jira into states.json
"""
# Delete content of states.json before re-writing
with open("states.json", "w"):
pass
print("Running scrapy spider over JIRA...")
check_call("scrapy runspider jiraspider.py -o states.json".split(" "))
print("-" * 20)
def engineering_time_spent(state_dict):
"""
Given a ticket's state dictionary, returns how much engineering time was spent on it.
Engineering states determined by EDX_ENGINEERING_STATES list.
"""
# Measurement 1: Average Time Spent by edX Engineering
# This measurement will sum up the amount of time it takes the engineering team to process OSPR work.
# AverageTime = sum(amount of time a ticket spends in edX states) / count(all tickets)
# This will be a rolling average over all tickets currently open, or closed in the past X days.
# In the initial rollout of this measurement, we'll track for X=14, 30, and 60 days. After we have a few months'
# worth of data, we can assess what historical interval(s) gives us the most useful, actionable data.
# This is a measurement across all of engineering. We are not proposing to measure teams individually.
total_time = datetime.timedelta(0)
for state, tdelta in state_dict.iteritems():
if state in EDX_ENGINEERING_STATES:
total_time += tdelta
return total_time
def single_state_time_spent(state_dict, state):
"""
Given a ticket's state dictionary, returns how much time it spent
in the given `state`.
Assumes state_dict has the key `state` present.
"""
# Measurement 2: Average Time Spent in Scrum Team Backlog
# For the PRs that need to be reviewed by a scrum team, obtain an average of how long a ticket spends in a team backlog.
# AverageBacklog = sum(amount of time a ticket spends in "Awaiting Prioritization") /
# count(tickets with a non-zero amount of time spent in "Awaiting Prioritization")
# This will be a rolling average over all tickets currently open, or closed in the past X days.
# In the initial rollout of this measurement, we'll track for X=14, 30, and 60 days. After we have a few months'
# worth of data, we can assess what historical interval(s) gives us the most useful, actionable data.
return state_dict[state]
def sanitize_ticket_states(state_dict):
"""
Converts timedelta strings back into timedeltas.
These were explicitly serialized as '{0.days}:{0.seconds}'.format(tdelta)
"""
result = {}
for state, tdelta in state_dict.iteritems():
tdict = {'days': tdelta[0], 'seconds': tdelta[1]}
result[state] = datetime.timedelta(**tdict)
return result
def parse_jira_info(debug=False, pretty=False):
"""
Read in and parse states.json
Converts json representations of time to datetime objects, then returns a list of
ticket dictionaries.
"""
with open("states.json") as state_file:
# tickets is a list composed of state dictionaries for each ospr ticket.
# Keys are: 'issue' -> string, 'states' -> dict, 'labels' -> list,
# Optional keys are: 'resolution' -> list, 'debug' -> string, 'error' -> string
tickets = json.load(state_file)
# Go through tickets and sanitize data, report errors, etc
for ticket in tickets:
# Report any errors / debug messages
if ticket.get('error', False):
print("Error in ticket {}: {}".format(ticket['issue'], ticket['error']))
if debug and ticket.get('debug', False):
print("Debug: ticket {}: {}".format(ticket['issue'], ticket['debug']))
# Deal with "resolved" datetime
if ticket.get('resolved', False):
# Turn str(datetime) back into a datetime object
ticket['resolved'] = dateutil.parser.parse(ticket['resolved'])
else:
# Ticket is not yet resolved. Set "resolved" date to right now, so it'll
# show up in the filter for being resolved within the past X days (hack for cleaner code)
ticket['resolved'] = datetime.datetime.now()
# Sanitize ticket state dict (need to convert time strings to timedeltas)
if ticket.get('states', False):
ticket['states'] = sanitize_ticket_states(ticket['states'])
else:
# This shouldn't happen so something's going wrong
print("No states for ticket {}".format(ticket['issue']))
return tickets
def get_time_lists(tickets, num_past_days=0):
"""
Iterates over tickets, collecting lists of how much time was spent in various states.
Returns: dictionary of {'name': [datetime.timedelta,]}
- Time each ticket spent in all engineering states
- Time each ticket spent in triage
- Time each ticket spent in product review
- Time each ticket spent in team backlogs
"""
# Set up vars
eng_time_spent, triage_time_spent, product_time, backlog_time = [], [], [], []
date_x_days_ago = datetime.datetime.now() - datetime.timedelta(days=num_past_days)
# Go through tickets again, gathering and reporting information
for ticket in tickets:
# If we're restricting to past days, and the ticket was resolved longer ago
# than our limit, skip it.
if num_past_days > 0 and ticket['resolved'] < date_x_days_ago:
continue
# Get amount of time this spent in "Needs Triage" (roughly, time to first response)
triage_time_spent.append(single_state_time_spent(ticket['states'], 'Needs Triage'))
# Calculate total time spent by engineering team on this ticket
eng_time_spent.append(engineering_time_spent(ticket['states']))
# Get time spent in backlog
if ticket['states'].get('Awaiting Prioritization', False):
backlog_time.append(single_state_time_spent(ticket['states'], 'Awaiting Prioritization'))
# Get time spent in product review
if ticket['states'].get('Product Review', False):
product_time.append(single_state_time_spent(ticket['states'], 'Product Review'))
result = OrderedDict()
list_order = ['edX engineering states', 'Needs Triage', 'Product Review', 'Team Backlog']
lists = (eng_time_spent, triage_time_spent, product_time, backlog_time)
for name, lst in zip(list_order, lists):
result[name] = lst
return result
def avg_time_spent(time_spent):
"""
Returns the average time spent over the number of tickets.
"""
# Can't use numpy or other standards because sum() won't work with
# a list of datetime.timedeltas
return reduce(operator.add, time_spent, datetime.timedelta(0)) / len(time_spent)
def std_dev(time_spent):
"""
Standard deviation of the list.
Calculation follows formula std = sqrt(mean( (x - x.mean())**2 ) )
"""
avg = avg_time_spent(time_spent)
summation = 0
for sample in time_spent:
diff = sample - avg
summation += diff.total_seconds()**2
variance = summation / len(time_spent)
std = int(numpy.sqrt(variance))
return datetime.timedelta(seconds=std)
def make_percentile(qper):
"""
Returns a percentile function for the given numeric qper
qper: Float in range of [0,100]. Percentile to compute which must be
between 0 and 100 inclusive.
"""
def percentile(time_spent):
"""
Returns the qth percentile of the tickets
"""
seconds_spent = map(datetime.timedelta.total_seconds, time_spent)
raw_result = numpy.percentile(seconds_spent, qper)
return datetime.timedelta(seconds=raw_result)
return percentile
def pretty_print_time(time, message=None):
"""Pretty print the given time"""
days = time.days
hours, remainder = divmod(time.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if message is not None:
print(message)
print(f'\t {days} days, {hours} hours, {minutes} minutes, {seconds} seconds')
# Print out fractional days things have been in this state
total = days + hours/24. + minutes/(24.*60) + seconds/(24.*3600)
return f"{total:.1f}"
def get_stats(time_spent, functions, pretty=False):
"""
Given a list of times and a list of stats functions, prints out all the
stats over the list (optionally in a pretty format)
"""
header = ''
results = ''
for func, fname in functions:
output = func(time_spent)
if pretty:
msg = f'{fname} time spent'
pretty_print_time(output, msg)
else:
# build up a string to print out.
header += f"{fname}\t|"
results += "{}\t ".format(pretty_print_time(output))
if not pretty:
print(header + 'Num tickets')
print(results + '{}\n'.format(len(time_spent)))
def main(argv):
"""a docstring for main, really?"""
parser = argparse.ArgumentParser(description="Summarize JIRA info.")
parser.add_argument(
"--no-scrape", action="store_true",
help="Don't re-run the scraper, just read the current states.json file"
)
parser.add_argument(
"--since", metavar="DAYS", type=int, default=0,
help="Only consider unresolved PRs & PRs closed in the past DAYS days"
)
parser.add_argument(
"--debug", action="store_true",
help="Show debugging messages"
)
parser.add_argument(
"--pretty", action="store_true",
help="Pretty print output"
)
parser.add_argument(
"--average", action="store_true",
help="Print out the average time spent in each of 4 states"
)
parser.add_argument(
"--median", action="store_true",
help="Print out the median time spent in each of 4 states"
)
parser.add_argument(
"--percentile", type=float,
help="Print out the qth percentile of all tickets in each state"
)
parser.add_argument(
"--std-dev", action="store_true",
help="Print out the standard deviation across the data"
)
parser.add_argument(
"--max", action="store_true",
help="Show the maximum time in the series"
)
parser.add_argument(
"--min", action="store_true",
help="Show the minimum time in the series"
)
parser.add_argument(
"--all", action="store_true",
help="Show all statistics"
)
args = parser.parse_args(argv[1:])
# Parse out what functions we want to gather for this report
functions = []
if args.average or args.all:
functions.append((avg_time_spent, 'Average'))
if args.median or args.all:
median_time_spent = make_percentile(50)
functions.append((median_time_spent, 'Median'))
if args.percentile or args.all:
pnum = args.percentile or 95
pfunc = make_percentile(pnum)
functions.append((pfunc, f'{pnum}%'))
if args.std_dev or args.all:
functions.append((std_dev, 'StdDev'))
if args.max or args.all:
functions.append((lambda lst: max(lst), 'Max'))
if args.min or args.all:
functions.append((lambda lst: min(lst), 'Min'))
if len(functions) == 0:
print("Alert: No statistical functions specified. Please use '--help' to see which are available, or use '--all' to run all.")
return
# Scrape jira unless told otherwise
if not args.no_scrape:
scrape_jira()
# Parse states.json into times list
tickets = parse_jira_info(args.debug, args.pretty)
# Gets {'list name': list}
ticket_lists = get_time_lists(tickets, args.since)
for list_name, time_spent in ticket_lists.iteritems():
print("-" * 40)
num_tix = len(time_spent)
print(f"Statistics for '{list_name}', over {num_tix} tickets")
print("-" * 40)
get_stats(time_spent, functions, args.pretty)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| en | 0.901261 | #!/usr/bin/env python Scrapes and parses information from JIRA's transition states. Runs the JIRA spider, then parses the output states.json file to obtain KPI information. See https://openedx.atlassian.net/wiki/display/OPEN/Tracking+edX+Commitment+To+OSPRs Re-scrapes jira into states.json # Delete content of states.json before re-writing Given a ticket's state dictionary, returns how much engineering time was spent on it. Engineering states determined by EDX_ENGINEERING_STATES list. # Measurement 1: Average Time Spent by edX Engineering # This measurement will sum up the amount of time it takes the engineering team to process OSPR work. # AverageTime = sum(amount of time a ticket spends in edX states) / count(all tickets) # This will be a rolling average over all tickets currently open, or closed in the past X days. # In the initial rollout of this measurement, we'll track for X=14, 30, and 60 days. After we have a few months' # worth of data, we can assess what historical interval(s) gives us the most useful, actionable data. # This is a measurement across all of engineering. We are not proposing to measure teams individually. Given a ticket's state dictionary, returns how much time it spent in the given `state`. Assumes state_dict has the key `state` present. # Measurement 2: Average Time Spent in Scrum Team Backlog # For the PRs that need to be reviewed by a scrum team, obtain an average of how long a ticket spends in a team backlog. # AverageBacklog = sum(amount of time a ticket spends in "Awaiting Prioritization") / # count(tickets with a non-zero amount of time spent in "Awaiting Prioritization") # This will be a rolling average over all tickets currently open, or closed in the past X days. # In the initial rollout of this measurement, we'll track for X=14, 30, and 60 days. After we have a few months' # worth of data, we can assess what historical interval(s) gives us the most useful, actionable data. Converts timedelta strings back into timedeltas. These were explicitly serialized as '{0.days}:{0.seconds}'.format(tdelta) Read in and parse states.json Converts json representations of time to datetime objects, then returns a list of ticket dictionaries. # tickets is a list composed of state dictionaries for each ospr ticket. # Keys are: 'issue' -> string, 'states' -> dict, 'labels' -> list, # Optional keys are: 'resolution' -> list, 'debug' -> string, 'error' -> string # Go through tickets and sanitize data, report errors, etc # Report any errors / debug messages # Deal with "resolved" datetime # Turn str(datetime) back into a datetime object # Ticket is not yet resolved. Set "resolved" date to right now, so it'll # show up in the filter for being resolved within the past X days (hack for cleaner code) # Sanitize ticket state dict (need to convert time strings to timedeltas) # This shouldn't happen so something's going wrong Iterates over tickets, collecting lists of how much time was spent in various states. Returns: dictionary of {'name': [datetime.timedelta,]} - Time each ticket spent in all engineering states - Time each ticket spent in triage - Time each ticket spent in product review - Time each ticket spent in team backlogs # Set up vars # Go through tickets again, gathering and reporting information # If we're restricting to past days, and the ticket was resolved longer ago # than our limit, skip it. # Get amount of time this spent in "Needs Triage" (roughly, time to first response) # Calculate total time spent by engineering team on this ticket # Get time spent in backlog # Get time spent in product review Returns the average time spent over the number of tickets. # Can't use numpy or other standards because sum() won't work with # a list of datetime.timedeltas Standard deviation of the list. Calculation follows formula std = sqrt(mean( (x - x.mean())**2 ) ) Returns a percentile function for the given numeric qper qper: Float in range of [0,100]. Percentile to compute which must be between 0 and 100 inclusive. Returns the qth percentile of the tickets Pretty print the given time # Print out fractional days things have been in this state Given a list of times and a list of stats functions, prints out all the stats over the list (optionally in a pretty format) # build up a string to print out. a docstring for main, really? # Parse out what functions we want to gather for this report # Scrape jira unless told otherwise # Parse states.json into times list # Gets {'list name': list} | 2.784524 | 3 |
reviews/views.py | n3trob3/nimrodage | 0 | 6633129 | from django.shortcuts import render
from django.views.generic import ListView
from .models import Reviews
from Home.models import Service, Industry
# Create your views here.
class ReviewPage(ListView):
model = Reviews
context_object_name = 'reviews'
template_name = 'reviews/reviews.html'
def get_context_data(self):
context = super(ReviewPage, self).get_context_data()
context['title']='Reviews'
context['services']=Service.objects.all()
context['industries']=Industry.objects.all()
return context
| from django.shortcuts import render
from django.views.generic import ListView
from .models import Reviews
from Home.models import Service, Industry
# Create your views here.
class ReviewPage(ListView):
model = Reviews
context_object_name = 'reviews'
template_name = 'reviews/reviews.html'
def get_context_data(self):
context = super(ReviewPage, self).get_context_data()
context['title']='Reviews'
context['services']=Service.objects.all()
context['industries']=Industry.objects.all()
return context
| en | 0.968116 | # Create your views here. | 1.959507 | 2 |
utils/help.py | JakeWasChosen/edoC | 1 | 6633130 | <gh_stars>1-10
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. <NAME> +
# All rights reserved. +
# This file is part of the edoC discord bot project , +
# and is released under the "MIT License Agreement". Please see the LICENSE +
# file that should have been included as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import inspect
import itertools
from typing import List, Dict, Any, Optional, Union
import discord
from discord.ext import commands, menus
from discord.utils import format_dt
from utils.pagination import edoCPages
from utils.vars import *
# from lib.db import db
"""
def get_prefix(bot, message):
try:
if not message.guild:
return config["default_prefix"]
else:
prefix = db.field("SELECT Prefix FROM guilds WHERE GuildID = ?", message.guild.id)
if prefix:
return commands.when_mentioned_or(prefix)(bot, message)
else:
return config["default_prefix"]
except AttributeError:
return config["default_prefix"]
class HelpFormat(DefaultHelpCommand):
def get_destination(self, no_pm: bool = False):
if no_pm:
return self.context.channel
else:
return self.context.author
async def send_error_message(self, error):
destination = self.get_destination(no_pm=True)
await destination.send(error)
async def send_command_help(self, command):
self.add_command_formatting(command)
self.paginator.close_page()
await self.send_pages(no_pm=True)
async def send_pages(self, no_pm: bool = False):
try:
if permissions.can_handle(self.context, "add_reactions"):
await self.context.message.add_reaction(chr(0x2709))
except discord.Forbidden:
pass
try:
destination = self.get_destination(no_pm=no_pm)
for page in self.paginator.pages:
await destination.send(page)
except discord.Forbidden:
destination = self.get_destination(no_pm=True)
await destination.send("Couldn't send help to you due to blocked DMs...")
"""
class GroupHelpPageSource(menus.ListPageSource):
def __init__(self, group: Union[commands.Group, commands.Cog], commands: List[commands.Command], *, prefix: str):
super().__init__(entries=commands, per_page=6)
self.group = group
self.prefix = prefix
self.title = f'{self.group.qualified_name} Commands'
self.description = self.group.description
async def format_page(self, menu, commands):
embed = discord.Embed(title=self.title, description=self.description, colour=discord.Colour(0xA8B9CD))
for command in commands:
signature = f'{command.qualified_name} {command.signature}'
embed.add_field(name=signature, value=command.short_doc or 'No help given...', inline=False)
maximum = self.get_max_pages()
if maximum > 1:
embed.set_author(name=f'Page {menu.current_page + 1}/{maximum} ({len(self.entries)} commands)')
embed.set_footer(text=f'Use "{self.prefix}help command" for more info on a command.')
return embed
class HelpSelectMenu(discord.ui.Select['HelpMenu']):
def __init__(self, commands: Dict[commands.Cog, List[commands.Command]], bot: commands.AutoShardedBot):
super().__init__(
placeholder='Select a category...',
min_values=1,
max_values=1,
row=0,
)
self.commands = commands
self.bot = bot
self.__fill_options()
def __fill_options(self) -> None:
self.add_option(
label='Index',
emoji='\N{WAVING HAND SIGN}',
value='__index',
description='The help page showing how to use the bot.',
)
for cog, commands in self.commands.items():
if not commands:
continue
description = cog.description.split('\n', 1)[0] or None
emoji = getattr(cog, 'display_emoji', None)
self.add_option(label=cog.qualified_name, value=cog.qualified_name, description=description, emoji=emoji)
async def callback(self, interaction: discord.Interaction):
assert self.view is not None
value = self.values[0]
if value == '__index':
await self.view.rebind(FrontPageSource(), interaction)
else:
cog = self.bot.get_cog(value)
if cog is None:
await interaction.response.send_message('Somehow this category does not exist?', ephemeral=True)
return
commands = self.commands[cog]
if not commands:
await interaction.response.send_message('This category has no commands for you', ephemeral=True)
return
source = GroupHelpPageSource(cog, commands, prefix=self.view.ctx.clean_prefix)
await self.view.rebind(source, interaction)
class HelpMenu(edoCPages):
def __init__(self, source: menus.PageSource, ctx):
super().__init__(source, ctx=ctx, compact=True)
def add_categories(self, commands: Dict[commands.Cog, List[commands.Command]]) -> None:
self.clear_items()
self.add_item(HelpSelectMenu(commands, self.ctx.bot))
self.fill_items()
async def rebind(self, source: menus.PageSource, interaction: discord.Interaction) -> None:
self.source = source
self.current_page = 0
await self.source._prepare_once()
page = await self.source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
self._update_labels(0)
await interaction.response.edit_message(**kwargs, view=self)
class FrontPageSource(menus.PageSource):
def is_paginating(self) -> bool:
# This forces the buttons to appear even in the front page
return True
def get_max_pages(self) -> Optional[int]:
# There's only one actual page in the front page
# However we need at least 2 to show all the buttons
return 2
async def get_page(self, page_number: int) -> Any:
# The front page is a dummy
self.index = page_number
return self
def format_page(self, menu: HelpMenu, page):
embed = discord.Embed(title='Bot Help', colour=discord.Colour(0xA8B9CD))
embed.description = inspect.cleandoc(
f"""
Hello! Welcome to the help page.
Use "{menu.ctx.clean_prefix}help command" for more info on a command.
Use "{menu.ctx.clean_prefix}help category" for more info on a category.
Use the dropdown menu below to select a category.
"""
)
embed.add_field(
name='Support Server',
value='For more help, consider joining the official server over at https://discord.gg/6EFAqm5aSG',
inline=False,
)
created_at = format_dt(menu.ctx.bot.user.created_at, 'F')
if self.index == 0:
embed.add_field(
name='Who are you?',
value=(
"I'm a bot made by Jake CEO of annoyance#1904.I've been running since "
f'{created_at}. I have features such as moderation, fun, info, and more. You can get more '
'information on my commands by using the dropdown below.\n\n'
"I'm also open source. You can see my code on [GitHub](https://github.com/JakeWasChosen/edoC)!"
),
inline=False,
)
elif self.index == 1:
entries = (
('<argument>', 'This means the argument is __**required**__.'),
('[argument]', 'This means the argument is __**optional**__.'),
('[A|B]', 'This means that it can be __**either A or B**__.'),
(
'[argument...]',
'This means you can have multiple arguments.\n'
'Now that you know the basics, it should be noted that...\n'
'__**You do not type in the brackets!**__',
),
)
embed.add_field(name='How do I use this bot?', value='Reading the bot signature is pretty simple.')
for name, value in entries:
embed.add_field(name=name, value=value, inline=False)
return embed
class PaginatedHelpCommand(commands.HelpCommand):
def __init__(self):
super().__init__(
command_attrs={
'cooldown': commands.CooldownMapping.from_cooldown(1, 3.0, commands.BucketType.member),
'help': 'Shows help about the bot, a command, or a category',
}
)
async def on_help_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
# Ignore missing permission errors
if isinstance(error.original, discord.HTTPException) and error.original.code == 50013:
return
await ctx.send(str(error.original))
def get_command_signature(self, command):
parent = command.full_parent_name
if len(command.aliases) > 0:
aliases = '|'.join(command.aliases)
fmt = f'[{command.name}|{aliases}]'
if parent:
fmt = f'{parent} {fmt}'
alias = fmt
else:
alias = command.name if not parent else f'{parent} {command.name}'
return f'{alias} {command.signature}'
async def send_bot_help(self, mapping):
bot = self.context.bot
def key(command) -> str:
cog = command.cog
return cog.qualified_name if cog else '\U0010ffff'
entries: List[commands.Command] = await self.filter_commands(bot.commands, sort=True, key=key)
all_commands: Dict[commands.Cog, List[commands.Command]] = {}
for name, children in itertools.groupby(entries, key=key):
if name == '\U0010ffff':
continue
cog = bot.get_cog(name)
all_commands[cog] = sorted(children, key=lambda c: c.qualified_name)
menu = HelpMenu(FrontPageSource(), ctx=self.context)
menu.add_categories(all_commands)
await menu.start()
async def send_cog_help(self, cog):
entries = await self.filter_commands(cog.get_commands(), sort=True)
menu = HelpMenu(GroupHelpPageSource(cog, entries, prefix=self.context.clean_prefix), ctx=self.context)
await menu.start()
def common_command_formatting(self, embed_like, command):
embed_like.title = self.get_command_signature(command)
if command.description:
embed_like.description = f'{command.description}\n\n{command.help}'
else:
embed_like.description = command.help or 'No help found...'
async def send_command_help(self, command):
# No pagination necessary for a single command.
embed = discord.Embed(colour=discord.Colour(0xA8B9CD))
self.common_command_formatting(embed, command)
await self.context.send(embed=embed)
async def send_group_help(self, group):
subcommands = group.commands
if len(subcommands) == 0:
return await self.send_command_help(group)
entries = await self.filter_commands(subcommands, sort=True)
if len(entries) == 0:
return await self.send_command_help(group)
source = GroupHelpPageSource(group, entries, prefix=self.context.clean_prefix)
self.common_command_formatting(source, group)
menu = HelpMenu(source, ctx=self.context)
await menu.start()
class MyNewHelp(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
try:
for page in self.paginator.pages:
emby = discord.Embed(description=page, color=random_color())
await destination.send(embed=emby)
except discord.Forbidden:
await destination.send("Couldn't send help to you due to blocked DMs...")
async def send_error_message(self, err):
embed = discord.Embed(title="Error", description=err, color=error)
channel = self.get_destination()
await channel.send(embed=embed)
| # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. <NAME> +
# All rights reserved. +
# This file is part of the edoC discord bot project , +
# and is released under the "MIT License Agreement". Please see the LICENSE +
# file that should have been included as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import inspect
import itertools
from typing import List, Dict, Any, Optional, Union
import discord
from discord.ext import commands, menus
from discord.utils import format_dt
from utils.pagination import edoCPages
from utils.vars import *
# from lib.db import db
"""
def get_prefix(bot, message):
try:
if not message.guild:
return config["default_prefix"]
else:
prefix = db.field("SELECT Prefix FROM guilds WHERE GuildID = ?", message.guild.id)
if prefix:
return commands.when_mentioned_or(prefix)(bot, message)
else:
return config["default_prefix"]
except AttributeError:
return config["default_prefix"]
class HelpFormat(DefaultHelpCommand):
def get_destination(self, no_pm: bool = False):
if no_pm:
return self.context.channel
else:
return self.context.author
async def send_error_message(self, error):
destination = self.get_destination(no_pm=True)
await destination.send(error)
async def send_command_help(self, command):
self.add_command_formatting(command)
self.paginator.close_page()
await self.send_pages(no_pm=True)
async def send_pages(self, no_pm: bool = False):
try:
if permissions.can_handle(self.context, "add_reactions"):
await self.context.message.add_reaction(chr(0x2709))
except discord.Forbidden:
pass
try:
destination = self.get_destination(no_pm=no_pm)
for page in self.paginator.pages:
await destination.send(page)
except discord.Forbidden:
destination = self.get_destination(no_pm=True)
await destination.send("Couldn't send help to you due to blocked DMs...")
"""
class GroupHelpPageSource(menus.ListPageSource):
def __init__(self, group: Union[commands.Group, commands.Cog], commands: List[commands.Command], *, prefix: str):
super().__init__(entries=commands, per_page=6)
self.group = group
self.prefix = prefix
self.title = f'{self.group.qualified_name} Commands'
self.description = self.group.description
async def format_page(self, menu, commands):
embed = discord.Embed(title=self.title, description=self.description, colour=discord.Colour(0xA8B9CD))
for command in commands:
signature = f'{command.qualified_name} {command.signature}'
embed.add_field(name=signature, value=command.short_doc or 'No help given...', inline=False)
maximum = self.get_max_pages()
if maximum > 1:
embed.set_author(name=f'Page {menu.current_page + 1}/{maximum} ({len(self.entries)} commands)')
embed.set_footer(text=f'Use "{self.prefix}help command" for more info on a command.')
return embed
class HelpSelectMenu(discord.ui.Select['HelpMenu']):
def __init__(self, commands: Dict[commands.Cog, List[commands.Command]], bot: commands.AutoShardedBot):
super().__init__(
placeholder='Select a category...',
min_values=1,
max_values=1,
row=0,
)
self.commands = commands
self.bot = bot
self.__fill_options()
def __fill_options(self) -> None:
self.add_option(
label='Index',
emoji='\N{WAVING HAND SIGN}',
value='__index',
description='The help page showing how to use the bot.',
)
for cog, commands in self.commands.items():
if not commands:
continue
description = cog.description.split('\n', 1)[0] or None
emoji = getattr(cog, 'display_emoji', None)
self.add_option(label=cog.qualified_name, value=cog.qualified_name, description=description, emoji=emoji)
async def callback(self, interaction: discord.Interaction):
assert self.view is not None
value = self.values[0]
if value == '__index':
await self.view.rebind(FrontPageSource(), interaction)
else:
cog = self.bot.get_cog(value)
if cog is None:
await interaction.response.send_message('Somehow this category does not exist?', ephemeral=True)
return
commands = self.commands[cog]
if not commands:
await interaction.response.send_message('This category has no commands for you', ephemeral=True)
return
source = GroupHelpPageSource(cog, commands, prefix=self.view.ctx.clean_prefix)
await self.view.rebind(source, interaction)
class HelpMenu(edoCPages):
def __init__(self, source: menus.PageSource, ctx):
super().__init__(source, ctx=ctx, compact=True)
def add_categories(self, commands: Dict[commands.Cog, List[commands.Command]]) -> None:
self.clear_items()
self.add_item(HelpSelectMenu(commands, self.ctx.bot))
self.fill_items()
async def rebind(self, source: menus.PageSource, interaction: discord.Interaction) -> None:
self.source = source
self.current_page = 0
await self.source._prepare_once()
page = await self.source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
self._update_labels(0)
await interaction.response.edit_message(**kwargs, view=self)
class FrontPageSource(menus.PageSource):
def is_paginating(self) -> bool:
# This forces the buttons to appear even in the front page
return True
def get_max_pages(self) -> Optional[int]:
# There's only one actual page in the front page
# However we need at least 2 to show all the buttons
return 2
async def get_page(self, page_number: int) -> Any:
# The front page is a dummy
self.index = page_number
return self
def format_page(self, menu: HelpMenu, page):
embed = discord.Embed(title='Bot Help', colour=discord.Colour(0xA8B9CD))
embed.description = inspect.cleandoc(
f"""
Hello! Welcome to the help page.
Use "{menu.ctx.clean_prefix}help command" for more info on a command.
Use "{menu.ctx.clean_prefix}help category" for more info on a category.
Use the dropdown menu below to select a category.
"""
)
embed.add_field(
name='Support Server',
value='For more help, consider joining the official server over at https://discord.gg/6EFAqm5aSG',
inline=False,
)
created_at = format_dt(menu.ctx.bot.user.created_at, 'F')
if self.index == 0:
embed.add_field(
name='Who are you?',
value=(
"I'm a bot made by Jake CEO of annoyance#1904.I've been running since "
f'{created_at}. I have features such as moderation, fun, info, and more. You can get more '
'information on my commands by using the dropdown below.\n\n'
"I'm also open source. You can see my code on [GitHub](https://github.com/JakeWasChosen/edoC)!"
),
inline=False,
)
elif self.index == 1:
entries = (
('<argument>', 'This means the argument is __**required**__.'),
('[argument]', 'This means the argument is __**optional**__.'),
('[A|B]', 'This means that it can be __**either A or B**__.'),
(
'[argument...]',
'This means you can have multiple arguments.\n'
'Now that you know the basics, it should be noted that...\n'
'__**You do not type in the brackets!**__',
),
)
embed.add_field(name='How do I use this bot?', value='Reading the bot signature is pretty simple.')
for name, value in entries:
embed.add_field(name=name, value=value, inline=False)
return embed
class PaginatedHelpCommand(commands.HelpCommand):
def __init__(self):
super().__init__(
command_attrs={
'cooldown': commands.CooldownMapping.from_cooldown(1, 3.0, commands.BucketType.member),
'help': 'Shows help about the bot, a command, or a category',
}
)
async def on_help_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
# Ignore missing permission errors
if isinstance(error.original, discord.HTTPException) and error.original.code == 50013:
return
await ctx.send(str(error.original))
def get_command_signature(self, command):
parent = command.full_parent_name
if len(command.aliases) > 0:
aliases = '|'.join(command.aliases)
fmt = f'[{command.name}|{aliases}]'
if parent:
fmt = f'{parent} {fmt}'
alias = fmt
else:
alias = command.name if not parent else f'{parent} {command.name}'
return f'{alias} {command.signature}'
async def send_bot_help(self, mapping):
bot = self.context.bot
def key(command) -> str:
cog = command.cog
return cog.qualified_name if cog else '\U0010ffff'
entries: List[commands.Command] = await self.filter_commands(bot.commands, sort=True, key=key)
all_commands: Dict[commands.Cog, List[commands.Command]] = {}
for name, children in itertools.groupby(entries, key=key):
if name == '\U0010ffff':
continue
cog = bot.get_cog(name)
all_commands[cog] = sorted(children, key=lambda c: c.qualified_name)
menu = HelpMenu(FrontPageSource(), ctx=self.context)
menu.add_categories(all_commands)
await menu.start()
async def send_cog_help(self, cog):
entries = await self.filter_commands(cog.get_commands(), sort=True)
menu = HelpMenu(GroupHelpPageSource(cog, entries, prefix=self.context.clean_prefix), ctx=self.context)
await menu.start()
def common_command_formatting(self, embed_like, command):
embed_like.title = self.get_command_signature(command)
if command.description:
embed_like.description = f'{command.description}\n\n{command.help}'
else:
embed_like.description = command.help or 'No help found...'
async def send_command_help(self, command):
# No pagination necessary for a single command.
embed = discord.Embed(colour=discord.Colour(0xA8B9CD))
self.common_command_formatting(embed, command)
await self.context.send(embed=embed)
async def send_group_help(self, group):
subcommands = group.commands
if len(subcommands) == 0:
return await self.send_command_help(group)
entries = await self.filter_commands(subcommands, sort=True)
if len(entries) == 0:
return await self.send_command_help(group)
source = GroupHelpPageSource(group, entries, prefix=self.context.clean_prefix)
self.common_command_formatting(source, group)
menu = HelpMenu(source, ctx=self.context)
await menu.start()
class MyNewHelp(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
try:
for page in self.paginator.pages:
emby = discord.Embed(description=page, color=random_color())
await destination.send(embed=emby)
except discord.Forbidden:
await destination.send("Couldn't send help to you due to blocked DMs...")
async def send_error_message(self, err):
embed = discord.Embed(title="Error", description=err, color=error)
channel = self.get_destination()
await channel.send(embed=embed) | en | 0.589401 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Copyright (c) 2021. <NAME> + # All rights reserved. + # This file is part of the edoC discord bot project , + # and is released under the "MIT License Agreement". Please see the LICENSE + # file that should have been included as part of this package. + # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # from lib.db import db def get_prefix(bot, message): try: if not message.guild: return config["default_prefix"] else: prefix = db.field("SELECT Prefix FROM guilds WHERE GuildID = ?", message.guild.id) if prefix: return commands.when_mentioned_or(prefix)(bot, message) else: return config["default_prefix"] except AttributeError: return config["default_prefix"] class HelpFormat(DefaultHelpCommand): def get_destination(self, no_pm: bool = False): if no_pm: return self.context.channel else: return self.context.author async def send_error_message(self, error): destination = self.get_destination(no_pm=True) await destination.send(error) async def send_command_help(self, command): self.add_command_formatting(command) self.paginator.close_page() await self.send_pages(no_pm=True) async def send_pages(self, no_pm: bool = False): try: if permissions.can_handle(self.context, "add_reactions"): await self.context.message.add_reaction(chr(0x2709)) except discord.Forbidden: pass try: destination = self.get_destination(no_pm=no_pm) for page in self.paginator.pages: await destination.send(page) except discord.Forbidden: destination = self.get_destination(no_pm=True) await destination.send("Couldn't send help to you due to blocked DMs...") # This forces the buttons to appear even in the front page # There's only one actual page in the front page # However we need at least 2 to show all the buttons # The front page is a dummy Hello! Welcome to the help page. Use "{menu.ctx.clean_prefix}help command" for more info on a command. Use "{menu.ctx.clean_prefix}help category" for more info on a category. Use the dropdown menu below to select a category. #1904.I've been running since " # Ignore missing permission errors # No pagination necessary for a single command. | 2.342016 | 2 |
tmt/steps/provision/vagrant.py | optak/tmt | 0 | 6633131 | # coding: utf-8
""" Provision Step Vagrnat Class """
import tmt
import subprocess
import os
import re
from tmt.steps.provision.base import ProvisionBase
from tmt.utils import ConvertError, SpecificationError, GeneralError
from click import echo
from shlex import quote
from urllib.parse import urlparse
# DATA[*]:
# HOW = libvirt|virtual|docker|container|vagrant|...
# provider, in Vagrant's terminilogy
#
# IMAGE = URI|NAME
# NAME is for Vagrant or other HOW, passed directly
# URI can be path to BOX, QCOW2 or Vagrantfile f.e.
#
# BOX = Set a BOX name directly (in case of URI for IMAGE)
#
class ProvisionVagrant(ProvisionBase):
""" Use Vagrant to Provision an environment for testing """
executable = 'vagrant'
config_prefix = ' config.'
sync_type = 'rsync'
default_image = 'fedora/31-cloud-base'
default_container = 'fedora:latest'
default_indent = 16
vf_name = 'Vagrantfile'
timeout = 333
eol = '\n'
## Default API ##
def __init__(self, data, step):
""" Initialize the Vagrant provision step """
self.super = super(ProvisionVagrant, self)
self.super.__init__(data, step)
self.vagrantfile = os.path.join(self.provision_dir, self.vf_name)
self.vf_data = ''
self.path = os.path.join(self.provision_dir, 'data.yaml')
# Check for working Vagrant
self.run_vagrant('version')
# Let's check what's needed
self.check_how()
def load(self):
""" Load ProvisionVagrant step """
raise SpecificationError("NYI: cannot load")
self.super.load()
def save(self):
""" Save ProvisionVagrant step """
raise SpecificationError("NYI: cannot save")
self.super.save()
def go(self):
""" Execute actual provisioning """
self.init()
self.info(f'Provisioning {self.executable}, {self.vf_name}', self.vf_read())
return self.run_vagrant('up')
def execute(self, *args, **kwargs):
""" Execute remote command """
return self.run_vagrant('ssh', '-c', self.join(args))
def show(self):
""" Create and show the Vagrantfile """
self.init()
self.super.show(keys=['how', 'box', 'image'])
self.info(self.vf_name, self.vf_read())
def sync_workdir_to_guest(self):
""" sync on demand """
return self.run_vagrant('rsync')
def sync_workdir_from_guest(self):
""" sync from guest to host """
command = 'rsync-back'
self.plugin_install(command)
return self.run_vagrant(command)
def copy_from_guest(self, target):
""" copy file/folder from guest to host's copy dir """
beg = f"[[ -d '{target}' ]]"
end = 'exit 0; set -xe; '
isdir = f"{beg} || {end}"
isntdir = f"{beg} && {end}"
target_dir = f'{self.provision_dir}/copy/{target}'
self.execute(isdir + self.cmd_mkcp(target_dir, f'{target}/.'))
target_dir = f'$(dirname "{self.provision_dir}/copy/{target}")'
self.execute(isntdir + self.cmd_mkcp(target_dir, target))
self.sync_workdir_from_guest()
def destroy(self):
""" remove instance """
return self.run_vagrant('destroy', '-f')
def prepare(self, how, what):
""" add single 'preparator' and run it """
name = 'prepare'
cmd = 'provision'
# TODO: FIX
whatpath = os.path.join(self.step.plan.workdir,
'discover',
'one',
'tests',
what)
self.debug('Trying path', whatpath)
if os.path.exists(whatpath) and os.path.isfile(whatpath):
what = whatpath
if how == 'ansible':
name = how
self.add_config_block(cmd,
name,
f'become = true',
f'become_user = "root"',
f'playbook = "{what}"')
#TODO: run: 'never'
else:
if self.is_uri(what):
method = 'path'
else:
method = 'inline'
self.add_config('vm',
cmd,
self.quote(name),
self.kv('type', how),
self.kv('privileged', 'true'),
self.kv('run', 'never'),
self.kv(method, what))
# maybe?
return self.run_vagrant(cmd, f'--{cmd}-with', name)
## Additional API ##
def init(self):
""" Initialize Vagrantfile """
self.info('Provision dir', self.provision_dir)
# Are we resuming?
if os.path.exists(self.vagrantfile) and os.path.isfile(self.vagrantfile):
self.validate()
return
# Create a Vagrantfile
self.create()
# Let's add what's needed
# Do this first to install provider
self.add_how()
# Add default entries to Vagrantfile
self.add_defaults()
def create(self):
""" Create default Vagrantfile with our modifications """
self.run_vagrant('init', '-fm', self.data['box'])
self.info('Initialized new Vagrantfile', self.vf_read())
def clean(self):
""" remove box and base box """
return self.run_vagrant('box', 'remove', '-f', self.data['box'])
# TODO: libvirt storage removal?
def validate(self):
""" Validate Vagrantfile format """
return self.run_vagrant('validate')
def reload(self):
""" restart guest machine """
return self.run_vagrant('reload')
def plugin_install(self, name):
""" Install a vagrant plugin if it's not installed yet.
"""
plugin = f'{self.executable}-{name}'
command = ['plugin', 'install']
try:
# is it already present?
return self.run(f"bash -c \"{self.executable} {command[0]} list | grep '^{plugin}'\"")
except GeneralError:
pass
try:
# try to install it
return self.run_vagrant(command[0], command[1], plugin)
except GeneralError as error:
# Let's work-around the error handling limitation for now
# by getting the output manually
command = ' '.join([self.executable] + command + [plugin])
out, err = self.run(f"bash -c \"{command}; :\"")
if re.search(r"Conflicting dependency chains:", err) is None:
raise error
raise ConvertError('Dependency conflict detected.\n'
'Please install vagrant plugins from one source only(hint: `dnf remove vagrant-libvirt`).')
## Knowhow ##
def check_how(self):
""" Decide what to do when HOW is ...
does not add anything into Vagrantfile yet
"""
self.debug('VagrantProvider', 'Checking initial status, setting defaults.')
self.set_default('how', 'virtual')
self.set_default('image', self.default_image)
image = self.data['image']
if self.is_uri(image):
self.set_default('box', 'box_' + self.instance_name)
if re.search(r"\.box$", image) is None:
# an actual box file, Great!
pass
elif re.search(r"\.qcow2$", image) is None:
# do some qcow2 magic
self.data['box'] = '...'
raise SpecificationError("NYI: QCOW2 image")
else:
raise SpecificationError(f"Image format not recognized: {image}")
else:
self.set_default('box', image)
self.data['image'] = None
for x in ('how', 'box', 'image'):
self.info(f' {x}', self.data[x])
def add_how(self):
""" Add provider (in Vagrant-speak) specifics """
getattr(self,
f"how_{self.data['how']}",
lambda: 'generic',
)()
def how_virtual(self):
self.debug("generating", "virtual")
image = self.data['image']
if image:
self.add_config_value('vm', "box_url", image)
# let's try libvirt as default for now
self.how_libvirt()
def how_generic():
self.debug("generating", "generic")
self.add_provider(self.data['how'])
def how_libvirt(self):
name = 'libvirt'
self.debug("generating", name)
self.plugin_install(name)
self.add_provider(name, 'memory = 1024')
self.vf_backup("QEMU session")
try:
self.add_provider(name, 'qemu_use_session = true')
except GeneralError as error:
# Not really an error
#self.debug(error)
self.vf_restore()
def how_openstack(self):
self.debug("generating", "openstack")
raise SpecificationError('NYI: cannot currently run on openstack.')
def how_docker(self):
self.how_container()
def how_podman(self):
self.how_container()
def how_container(self):
self.debug("generating", "container")
raise SpecificationError('NYI: cannot currently run containers.')
## END of API ##
def vagrant_status(self):
""" Get vagrant's status """
raise ConvertError('NYI: cannot currently return status.')
# TODO: how to get stdout from self.run?
#csp = self.run_vagrant('status')
#return self.hr(csp.stdout)
def add_defaults(self):
""" Adds default config entries
1) Disable default sync
2) To sync plan workdir
3) setup ssh
4) memory: 1024
"""
self.add_synced_folder(".", "/vagrant", 'disabled: true')
dir = self.step.plan.workdir
self.add_synced_folder(dir, dir)
self.add_config_value('ssh', 'username', "root")
# probably not needed
#self.add_config_value('ssh', 'insert_key', 'true')
self.add_config_value('nfs', 'verify_installed', 'false')
def run_vagrant(self, *args):
""" Run vagrant command and raise an error if it fails
args = 'command args'
or
args = ['comand', 'args']
"""
if len(args) == 0:
raise RuntimeError("vagrant has to run with args")
elif len(args) == 1:
args = args[0]
cmd = self.prepend(args, self.executable)
# timeout = self.timeout,
return self.run(
cmd,
cwd = self.provision_dir)
def add_synced_folder(self, sync_from, sync_to, *args):
self.add_config('vm',
'synced_folder',
self.quote(sync_from),
self.quote(sync_to),
self.kv('type', self.sync_type),
*args)
def add_provider(self, provider, *config):
self.add_config_block('provider', provider, *config)
def add_config_block(self, name, block, *config):
""" Add config block into Vagrantfile
"""
config_str = ''
for c in config:
config_str += f'{block}.{c}; '
self.add_config('vm', f"{name} '{block}' do |{block}| {config_str}end")
def add_config_value(self, type, key, value):
""" Add config = value into Vagrantfile
"""
self.add_config(type, f"{key} = '{value}'")
def add_config(self, type, *config):
""" Add config entry into Vagrantfile right before last 'end',
and prepends it with `config_prefix`.
Adding arbitrary config entry:
config = "string"
or, with conversion:
config = ['one', 'two', 'three']
=> one "two", three
"""
if len(config) == 1:
config = config[0]
elif len(config) == 0:
raise RuntimeError("config has no definition")
else:
config = f'{config[0]} ' + ', '.join(config[1:])
self.info('Adding into Vagrantfile', f"{type}.{config}")
vf_tmp = self.vf_read()
# Lookup last 'end' in Vagrantfile
i = 0
for line in reversed(vf_tmp):
i -= 1
if (line.find('end') != -1):
break
vf_tmp = vf_tmp[:i] \
+ [self.config_prefix + f"{type}." + config] \
+ vf_tmp[i:]
self.vf_write(vf_tmp)
def vf_read(self):
""" read Vagrantfile
also splits
"""
return open(self.vagrantfile).read().splitlines()
def vf_write(self, vf_tmp):
""" write into Vagrantfile
str or list
runs validate()
"""
if type(vf_tmp) is list:
vf_tmp = self.eol.join(vf_tmp)
with open(self.vagrantfile, 'w', newline=self.eol) as f:
f.write(vf_tmp)
self.validate()
def vf_backup(self, msg=''):
""" backup Vagrantfile contents to vf_data """
if msg:
self.info("Trying", msg)
self.msg = msg
self.vf_data = self.vf_read()
def vf_restore(self):
""" restore Vagrantfile contents frmo vf_data"""
if self.msg:
self.info('Reverting', self.msg, 'red')
self.msg = ''
self.vf_write(self.vf_data)
## Helpers ##
def info(self, key = '', val = '', color = 'green'):
""" info out!
see msgout()
"""
self.msgout('debug', key, val, color)
def debug(self, key = '', val = '', color='yellow'):
""" debugging, yay!
see msgout()
"""
self.msgout('debug', key, val, color)
def msgout(self, mtype, key = '', val = '', color = 'red'):
""" args: key, value, indent, color
all optional
"""
if type(val) is list and len(val):
ind_val = ''
for v in val:
if v:
ind_val += ' '*self.default_indent + self.hr(v) + self.eol
val = ind_val
else:
val = self.hr(val)
emsg = lambda: RuntimeError(f"Message type unknown: {mtype}")
if val:
getattr(self.super,
mtype,
emsg,
)(key, val, color)
else:
getattr(self.super,
mtype,
emsg,
)(key)
def hr(self, val):
""" return human readable data """
if type(val) is tuple or type(val) is list:
ret = ''
for v in val:
ret += self.hr(v)
return ret
if type(val) is bytes:
val = str(val, "utf-8")
elif type(val) is not str:
val = str(val)
try:
val = rstrip(val)
eol = self.eol
except:
eol = ''
return f'{val}{eol}'
def set_default(self, where, default):
if not (where in self.data and self.data[where]):
self.data[where] = default
def prepend(self, thing, string):
if type(thing) is list:
return thing.insert(0, string)
elif type(thing) is tuple:
return (string ,) + thing
else:
return string + ' ' + thing
def cmd_mkcp(self, target_dir, target):
target_dir = self.quote(target_dir)
target = self.quote(target)
return f'mkdir -p {target_dir}; cp -vafr {target} {target_dir}'
def is_uri(self, uri):
return getattr(urlparse(uri),
'schema',
None)
def quote(self, string):
return f'"{string}"'
def kv(self, key, val):
return f'{key}: "{val}"'
| # coding: utf-8
""" Provision Step Vagrnat Class """
import tmt
import subprocess
import os
import re
from tmt.steps.provision.base import ProvisionBase
from tmt.utils import ConvertError, SpecificationError, GeneralError
from click import echo
from shlex import quote
from urllib.parse import urlparse
# DATA[*]:
# HOW = libvirt|virtual|docker|container|vagrant|...
# provider, in Vagrant's terminilogy
#
# IMAGE = URI|NAME
# NAME is for Vagrant or other HOW, passed directly
# URI can be path to BOX, QCOW2 or Vagrantfile f.e.
#
# BOX = Set a BOX name directly (in case of URI for IMAGE)
#
class ProvisionVagrant(ProvisionBase):
""" Use Vagrant to Provision an environment for testing """
executable = 'vagrant'
config_prefix = ' config.'
sync_type = 'rsync'
default_image = 'fedora/31-cloud-base'
default_container = 'fedora:latest'
default_indent = 16
vf_name = 'Vagrantfile'
timeout = 333
eol = '\n'
## Default API ##
def __init__(self, data, step):
""" Initialize the Vagrant provision step """
self.super = super(ProvisionVagrant, self)
self.super.__init__(data, step)
self.vagrantfile = os.path.join(self.provision_dir, self.vf_name)
self.vf_data = ''
self.path = os.path.join(self.provision_dir, 'data.yaml')
# Check for working Vagrant
self.run_vagrant('version')
# Let's check what's needed
self.check_how()
def load(self):
""" Load ProvisionVagrant step """
raise SpecificationError("NYI: cannot load")
self.super.load()
def save(self):
""" Save ProvisionVagrant step """
raise SpecificationError("NYI: cannot save")
self.super.save()
def go(self):
""" Execute actual provisioning """
self.init()
self.info(f'Provisioning {self.executable}, {self.vf_name}', self.vf_read())
return self.run_vagrant('up')
def execute(self, *args, **kwargs):
""" Execute remote command """
return self.run_vagrant('ssh', '-c', self.join(args))
def show(self):
""" Create and show the Vagrantfile """
self.init()
self.super.show(keys=['how', 'box', 'image'])
self.info(self.vf_name, self.vf_read())
def sync_workdir_to_guest(self):
""" sync on demand """
return self.run_vagrant('rsync')
def sync_workdir_from_guest(self):
""" sync from guest to host """
command = 'rsync-back'
self.plugin_install(command)
return self.run_vagrant(command)
def copy_from_guest(self, target):
""" copy file/folder from guest to host's copy dir """
beg = f"[[ -d '{target}' ]]"
end = 'exit 0; set -xe; '
isdir = f"{beg} || {end}"
isntdir = f"{beg} && {end}"
target_dir = f'{self.provision_dir}/copy/{target}'
self.execute(isdir + self.cmd_mkcp(target_dir, f'{target}/.'))
target_dir = f'$(dirname "{self.provision_dir}/copy/{target}")'
self.execute(isntdir + self.cmd_mkcp(target_dir, target))
self.sync_workdir_from_guest()
def destroy(self):
""" remove instance """
return self.run_vagrant('destroy', '-f')
def prepare(self, how, what):
""" add single 'preparator' and run it """
name = 'prepare'
cmd = 'provision'
# TODO: FIX
whatpath = os.path.join(self.step.plan.workdir,
'discover',
'one',
'tests',
what)
self.debug('Trying path', whatpath)
if os.path.exists(whatpath) and os.path.isfile(whatpath):
what = whatpath
if how == 'ansible':
name = how
self.add_config_block(cmd,
name,
f'become = true',
f'become_user = "root"',
f'playbook = "{what}"')
#TODO: run: 'never'
else:
if self.is_uri(what):
method = 'path'
else:
method = 'inline'
self.add_config('vm',
cmd,
self.quote(name),
self.kv('type', how),
self.kv('privileged', 'true'),
self.kv('run', 'never'),
self.kv(method, what))
# maybe?
return self.run_vagrant(cmd, f'--{cmd}-with', name)
## Additional API ##
def init(self):
""" Initialize Vagrantfile """
self.info('Provision dir', self.provision_dir)
# Are we resuming?
if os.path.exists(self.vagrantfile) and os.path.isfile(self.vagrantfile):
self.validate()
return
# Create a Vagrantfile
self.create()
# Let's add what's needed
# Do this first to install provider
self.add_how()
# Add default entries to Vagrantfile
self.add_defaults()
def create(self):
""" Create default Vagrantfile with our modifications """
self.run_vagrant('init', '-fm', self.data['box'])
self.info('Initialized new Vagrantfile', self.vf_read())
def clean(self):
""" remove box and base box """
return self.run_vagrant('box', 'remove', '-f', self.data['box'])
# TODO: libvirt storage removal?
def validate(self):
""" Validate Vagrantfile format """
return self.run_vagrant('validate')
def reload(self):
""" restart guest machine """
return self.run_vagrant('reload')
def plugin_install(self, name):
""" Install a vagrant plugin if it's not installed yet.
"""
plugin = f'{self.executable}-{name}'
command = ['plugin', 'install']
try:
# is it already present?
return self.run(f"bash -c \"{self.executable} {command[0]} list | grep '^{plugin}'\"")
except GeneralError:
pass
try:
# try to install it
return self.run_vagrant(command[0], command[1], plugin)
except GeneralError as error:
# Let's work-around the error handling limitation for now
# by getting the output manually
command = ' '.join([self.executable] + command + [plugin])
out, err = self.run(f"bash -c \"{command}; :\"")
if re.search(r"Conflicting dependency chains:", err) is None:
raise error
raise ConvertError('Dependency conflict detected.\n'
'Please install vagrant plugins from one source only(hint: `dnf remove vagrant-libvirt`).')
## Knowhow ##
def check_how(self):
""" Decide what to do when HOW is ...
does not add anything into Vagrantfile yet
"""
self.debug('VagrantProvider', 'Checking initial status, setting defaults.')
self.set_default('how', 'virtual')
self.set_default('image', self.default_image)
image = self.data['image']
if self.is_uri(image):
self.set_default('box', 'box_' + self.instance_name)
if re.search(r"\.box$", image) is None:
# an actual box file, Great!
pass
elif re.search(r"\.qcow2$", image) is None:
# do some qcow2 magic
self.data['box'] = '...'
raise SpecificationError("NYI: QCOW2 image")
else:
raise SpecificationError(f"Image format not recognized: {image}")
else:
self.set_default('box', image)
self.data['image'] = None
for x in ('how', 'box', 'image'):
self.info(f' {x}', self.data[x])
def add_how(self):
""" Add provider (in Vagrant-speak) specifics """
getattr(self,
f"how_{self.data['how']}",
lambda: 'generic',
)()
def how_virtual(self):
self.debug("generating", "virtual")
image = self.data['image']
if image:
self.add_config_value('vm', "box_url", image)
# let's try libvirt as default for now
self.how_libvirt()
def how_generic():
self.debug("generating", "generic")
self.add_provider(self.data['how'])
def how_libvirt(self):
name = 'libvirt'
self.debug("generating", name)
self.plugin_install(name)
self.add_provider(name, 'memory = 1024')
self.vf_backup("QEMU session")
try:
self.add_provider(name, 'qemu_use_session = true')
except GeneralError as error:
# Not really an error
#self.debug(error)
self.vf_restore()
def how_openstack(self):
self.debug("generating", "openstack")
raise SpecificationError('NYI: cannot currently run on openstack.')
def how_docker(self):
self.how_container()
def how_podman(self):
self.how_container()
def how_container(self):
self.debug("generating", "container")
raise SpecificationError('NYI: cannot currently run containers.')
## END of API ##
def vagrant_status(self):
""" Get vagrant's status """
raise ConvertError('NYI: cannot currently return status.')
# TODO: how to get stdout from self.run?
#csp = self.run_vagrant('status')
#return self.hr(csp.stdout)
def add_defaults(self):
""" Adds default config entries
1) Disable default sync
2) To sync plan workdir
3) setup ssh
4) memory: 1024
"""
self.add_synced_folder(".", "/vagrant", 'disabled: true')
dir = self.step.plan.workdir
self.add_synced_folder(dir, dir)
self.add_config_value('ssh', 'username', "root")
# probably not needed
#self.add_config_value('ssh', 'insert_key', 'true')
self.add_config_value('nfs', 'verify_installed', 'false')
def run_vagrant(self, *args):
""" Run vagrant command and raise an error if it fails
args = 'command args'
or
args = ['comand', 'args']
"""
if len(args) == 0:
raise RuntimeError("vagrant has to run with args")
elif len(args) == 1:
args = args[0]
cmd = self.prepend(args, self.executable)
# timeout = self.timeout,
return self.run(
cmd,
cwd = self.provision_dir)
def add_synced_folder(self, sync_from, sync_to, *args):
self.add_config('vm',
'synced_folder',
self.quote(sync_from),
self.quote(sync_to),
self.kv('type', self.sync_type),
*args)
def add_provider(self, provider, *config):
self.add_config_block('provider', provider, *config)
def add_config_block(self, name, block, *config):
""" Add config block into Vagrantfile
"""
config_str = ''
for c in config:
config_str += f'{block}.{c}; '
self.add_config('vm', f"{name} '{block}' do |{block}| {config_str}end")
def add_config_value(self, type, key, value):
""" Add config = value into Vagrantfile
"""
self.add_config(type, f"{key} = '{value}'")
def add_config(self, type, *config):
""" Add config entry into Vagrantfile right before last 'end',
and prepends it with `config_prefix`.
Adding arbitrary config entry:
config = "string"
or, with conversion:
config = ['one', 'two', 'three']
=> one "two", three
"""
if len(config) == 1:
config = config[0]
elif len(config) == 0:
raise RuntimeError("config has no definition")
else:
config = f'{config[0]} ' + ', '.join(config[1:])
self.info('Adding into Vagrantfile', f"{type}.{config}")
vf_tmp = self.vf_read()
# Lookup last 'end' in Vagrantfile
i = 0
for line in reversed(vf_tmp):
i -= 1
if (line.find('end') != -1):
break
vf_tmp = vf_tmp[:i] \
+ [self.config_prefix + f"{type}." + config] \
+ vf_tmp[i:]
self.vf_write(vf_tmp)
def vf_read(self):
""" read Vagrantfile
also splits
"""
return open(self.vagrantfile).read().splitlines()
def vf_write(self, vf_tmp):
""" write into Vagrantfile
str or list
runs validate()
"""
if type(vf_tmp) is list:
vf_tmp = self.eol.join(vf_tmp)
with open(self.vagrantfile, 'w', newline=self.eol) as f:
f.write(vf_tmp)
self.validate()
def vf_backup(self, msg=''):
""" backup Vagrantfile contents to vf_data """
if msg:
self.info("Trying", msg)
self.msg = msg
self.vf_data = self.vf_read()
def vf_restore(self):
""" restore Vagrantfile contents frmo vf_data"""
if self.msg:
self.info('Reverting', self.msg, 'red')
self.msg = ''
self.vf_write(self.vf_data)
## Helpers ##
def info(self, key = '', val = '', color = 'green'):
""" info out!
see msgout()
"""
self.msgout('debug', key, val, color)
def debug(self, key = '', val = '', color='yellow'):
""" debugging, yay!
see msgout()
"""
self.msgout('debug', key, val, color)
def msgout(self, mtype, key = '', val = '', color = 'red'):
""" args: key, value, indent, color
all optional
"""
if type(val) is list and len(val):
ind_val = ''
for v in val:
if v:
ind_val += ' '*self.default_indent + self.hr(v) + self.eol
val = ind_val
else:
val = self.hr(val)
emsg = lambda: RuntimeError(f"Message type unknown: {mtype}")
if val:
getattr(self.super,
mtype,
emsg,
)(key, val, color)
else:
getattr(self.super,
mtype,
emsg,
)(key)
def hr(self, val):
""" return human readable data """
if type(val) is tuple or type(val) is list:
ret = ''
for v in val:
ret += self.hr(v)
return ret
if type(val) is bytes:
val = str(val, "utf-8")
elif type(val) is not str:
val = str(val)
try:
val = rstrip(val)
eol = self.eol
except:
eol = ''
return f'{val}{eol}'
def set_default(self, where, default):
if not (where in self.data and self.data[where]):
self.data[where] = default
def prepend(self, thing, string):
if type(thing) is list:
return thing.insert(0, string)
elif type(thing) is tuple:
return (string ,) + thing
else:
return string + ' ' + thing
def cmd_mkcp(self, target_dir, target):
target_dir = self.quote(target_dir)
target = self.quote(target)
return f'mkdir -p {target_dir}; cp -vafr {target} {target_dir}'
def is_uri(self, uri):
return getattr(urlparse(uri),
'schema',
None)
def quote(self, string):
return f'"{string}"'
def kv(self, key, val):
return f'{key}: "{val}"'
| en | 0.614309 | # coding: utf-8 Provision Step Vagrnat Class # DATA[*]: # HOW = libvirt|virtual|docker|container|vagrant|... # provider, in Vagrant's terminilogy # # IMAGE = URI|NAME # NAME is for Vagrant or other HOW, passed directly # URI can be path to BOX, QCOW2 or Vagrantfile f.e. # # BOX = Set a BOX name directly (in case of URI for IMAGE) # Use Vagrant to Provision an environment for testing ## Default API ## Initialize the Vagrant provision step # Check for working Vagrant # Let's check what's needed Load ProvisionVagrant step Save ProvisionVagrant step Execute actual provisioning Execute remote command Create and show the Vagrantfile sync on demand sync from guest to host copy file/folder from guest to host's copy dir remove instance add single 'preparator' and run it # TODO: FIX #TODO: run: 'never' # maybe? ## Additional API ## Initialize Vagrantfile # Are we resuming? # Create a Vagrantfile # Let's add what's needed # Do this first to install provider # Add default entries to Vagrantfile Create default Vagrantfile with our modifications remove box and base box # TODO: libvirt storage removal? Validate Vagrantfile format restart guest machine Install a vagrant plugin if it's not installed yet. # is it already present? # try to install it # Let's work-around the error handling limitation for now # by getting the output manually ## Knowhow ## Decide what to do when HOW is ... does not add anything into Vagrantfile yet # an actual box file, Great! # do some qcow2 magic Add provider (in Vagrant-speak) specifics # let's try libvirt as default for now # Not really an error #self.debug(error) ## END of API ## Get vagrant's status # TODO: how to get stdout from self.run? #csp = self.run_vagrant('status') #return self.hr(csp.stdout) Adds default config entries 1) Disable default sync 2) To sync plan workdir 3) setup ssh 4) memory: 1024 # probably not needed #self.add_config_value('ssh', 'insert_key', 'true') Run vagrant command and raise an error if it fails args = 'command args' or args = ['comand', 'args'] # timeout = self.timeout, Add config block into Vagrantfile Add config = value into Vagrantfile Add config entry into Vagrantfile right before last 'end', and prepends it with `config_prefix`. Adding arbitrary config entry: config = "string" or, with conversion: config = ['one', 'two', 'three'] => one "two", three # Lookup last 'end' in Vagrantfile read Vagrantfile also splits write into Vagrantfile str or list runs validate() backup Vagrantfile contents to vf_data restore Vagrantfile contents frmo vf_data ## Helpers ## info out! see msgout() debugging, yay! see msgout() args: key, value, indent, color all optional return human readable data | 2.116718 | 2 |
xlib/avecl/_internal/op/transpose.py | jkennedyvz/DeepFaceLive | 3 | 6633132 | import numpy as np
from ..AAxes import AAxes
from ..AShape import AShape
from ..backend import Kernel
from ..HKernel import HKernel
from ..info import TransposeInfo
from ..SCacheton import SCacheton
from ..Tensor import Tensor
def transpose(input_t : Tensor, axes_order, op_text=None, dtype : np.dtype = None, output_t : Tensor=None, is_add_to_output=False) -> Tensor:
"""
arguments:
axes_order Int
Iterable of ints
None
dtype cast to dtype
op_text(None) optional op with value during transpose.
'O = I'
output_t compute result to this Tensor.
Tensor may be with different shape, but should match total size
"""
op = SCacheton.get(_TransposeOp, input_t.shape, input_t.dtype, dtype, AAxes(axes_order), op_text, False if output_t is None else is_add_to_output )
if output_t is None:
output_t = Tensor (op.o_shape, op.o_dtype, device=input_t.get_device())
elif output_t.shape.size != op.o_shape.size:
raise ValueError(f'output_t must have size {op.o_shape.size}')
input_t.get_device().run_kernel(op.forward_krn, output_t.get_buffer(), input_t.get_buffer() )
return output_t
class _TransposeOp:
def __init__(self, i_shape : AShape, i_dtype : np.dtype, o_dtype : np.dtype, axes_order : AAxes, op_text, is_add_to_output : bool ):
self.axes_order = axes_order
self.o_shape = o_shape = TransposeInfo(i_shape, axes_order).o_shape
self.o_dtype = o_dtype = o_dtype if o_dtype is not None else i_dtype
if op_text is None:
op_text = 'O = I'
self.forward_krn = Kernel(global_shape=(i_shape.size,), kernel_text=f"""
{HKernel.define_tensor('O', o_shape, o_dtype)}
{HKernel.define_tensor('I', i_shape, i_dtype)}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'i', i_shape.ndim)}
I_TYPE I = I_GLOBAL_LOAD(gid);
O_TYPE O;
{op_text};
{'O_STORE_ADD' if is_add_to_output else 'O_GLOBAL_STORE'}( O_IDX({HKernel.axes_order_enum('I', axes_order )}), O );
}}""")
| import numpy as np
from ..AAxes import AAxes
from ..AShape import AShape
from ..backend import Kernel
from ..HKernel import HKernel
from ..info import TransposeInfo
from ..SCacheton import SCacheton
from ..Tensor import Tensor
def transpose(input_t : Tensor, axes_order, op_text=None, dtype : np.dtype = None, output_t : Tensor=None, is_add_to_output=False) -> Tensor:
"""
arguments:
axes_order Int
Iterable of ints
None
dtype cast to dtype
op_text(None) optional op with value during transpose.
'O = I'
output_t compute result to this Tensor.
Tensor may be with different shape, but should match total size
"""
op = SCacheton.get(_TransposeOp, input_t.shape, input_t.dtype, dtype, AAxes(axes_order), op_text, False if output_t is None else is_add_to_output )
if output_t is None:
output_t = Tensor (op.o_shape, op.o_dtype, device=input_t.get_device())
elif output_t.shape.size != op.o_shape.size:
raise ValueError(f'output_t must have size {op.o_shape.size}')
input_t.get_device().run_kernel(op.forward_krn, output_t.get_buffer(), input_t.get_buffer() )
return output_t
class _TransposeOp:
def __init__(self, i_shape : AShape, i_dtype : np.dtype, o_dtype : np.dtype, axes_order : AAxes, op_text, is_add_to_output : bool ):
self.axes_order = axes_order
self.o_shape = o_shape = TransposeInfo(i_shape, axes_order).o_shape
self.o_dtype = o_dtype = o_dtype if o_dtype is not None else i_dtype
if op_text is None:
op_text = 'O = I'
self.forward_krn = Kernel(global_shape=(i_shape.size,), kernel_text=f"""
{HKernel.define_tensor('O', o_shape, o_dtype)}
{HKernel.define_tensor('I', i_shape, i_dtype)}
__kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME)
{{
size_t gid = get_global_id(0);
{HKernel.decompose_idx_to_axes_idxs('gid', 'i', i_shape.ndim)}
I_TYPE I = I_GLOBAL_LOAD(gid);
O_TYPE O;
{op_text};
{'O_STORE_ADD' if is_add_to_output else 'O_GLOBAL_STORE'}( O_IDX({HKernel.axes_order_enum('I', axes_order )}), O );
}}""")
| en | 0.306141 | arguments: axes_order Int Iterable of ints None dtype cast to dtype op_text(None) optional op with value during transpose. 'O = I' output_t compute result to this Tensor. Tensor may be with different shape, but should match total size {HKernel.define_tensor('O', o_shape, o_dtype)} {HKernel.define_tensor('I', i_shape, i_dtype)} __kernel void impl(__global O_PTR_TYPE* O_PTR_NAME, __global const I_PTR_TYPE* I_PTR_NAME) {{ size_t gid = get_global_id(0); {HKernel.decompose_idx_to_axes_idxs('gid', 'i', i_shape.ndim)} I_TYPE I = I_GLOBAL_LOAD(gid); O_TYPE O; {op_text}; {'O_STORE_ADD' if is_add_to_output else 'O_GLOBAL_STORE'}( O_IDX({HKernel.axes_order_enum('I', axes_order )}), O ); }} | 2.192755 | 2 |
lib/test_pca9685.py | Mario-Kart-Felix/oxygen | 1 | 6633133 | #!/sr/bin/env python
import time
from pca9685 import PCA9685
def main():
driver = PCA9685()
driver.set_pwm_freq(60)
time.sleep(3)
for i in range(300, 500, 5):
print(i)
driver.set_pwm(0, 0, i)
time.sleep(0.1)
for i in range(500, 299, -5):
print(i)
driver.set_pwm(0, 0, i)
time.sleep(0.1)
# driver.set_pwm(0, 0, 0)
if __name__ == "__main__":
main()
| #!/sr/bin/env python
import time
from pca9685 import PCA9685
def main():
driver = PCA9685()
driver.set_pwm_freq(60)
time.sleep(3)
for i in range(300, 500, 5):
print(i)
driver.set_pwm(0, 0, i)
time.sleep(0.1)
for i in range(500, 299, -5):
print(i)
driver.set_pwm(0, 0, i)
time.sleep(0.1)
# driver.set_pwm(0, 0, 0)
if __name__ == "__main__":
main()
| en | 0.19696 | #!/sr/bin/env python # driver.set_pwm(0, 0, 0) | 3.055096 | 3 |
ML_Chinahadoop/05/code/lesson/5.3.stat02.py | lsieun/learn-AI | 1 | 6633134 | # coding:utf-8
#
import numpy as np
from scipy import stats
def calc_statistics(x):
n = x.shape[0] # 样本个数
# 手动计算
m = 0
m2 = 0
m3 = 0
m4 = 0
for t in x:
m += t
m2 += t*t
m3 += t**3
m4 += t**4
m /= n
m2 /= n
m3 /= n
m4 /= n
mu = m
sigma = np.sqrt(m2 - mu*mu)
skew = (m3 - 3*mu*m2 + 2*mu**3) / sigma**3
kurtosis = (m4 - 4*mu*m3 + 6*mu*mu*m2 - 4*mu**3*mu + mu**4) / sigma**4 - 3
print('手动计算均值、标准差、偏度、峰度:', mu, sigma, skew, kurtosis)
def calc_statistics2(x):
# 使用系统函数验证
mu = np.mean(x, axis=0)
sigma = np.std(x, axis=0)
skew = stats.skew(x)
kurtosis = stats.kurtosis(x)
print('函数库计算均值、标准差、偏度、峰度:', mu, sigma, skew, kurtosis)
if __name__ == '__main__':
d = np.random.randn(5)
print(d)
print(d.shape)
calc_statistics(d)
calc_statistics2(d) | # coding:utf-8
#
import numpy as np
from scipy import stats
def calc_statistics(x):
n = x.shape[0] # 样本个数
# 手动计算
m = 0
m2 = 0
m3 = 0
m4 = 0
for t in x:
m += t
m2 += t*t
m3 += t**3
m4 += t**4
m /= n
m2 /= n
m3 /= n
m4 /= n
mu = m
sigma = np.sqrt(m2 - mu*mu)
skew = (m3 - 3*mu*m2 + 2*mu**3) / sigma**3
kurtosis = (m4 - 4*mu*m3 + 6*mu*mu*m2 - 4*mu**3*mu + mu**4) / sigma**4 - 3
print('手动计算均值、标准差、偏度、峰度:', mu, sigma, skew, kurtosis)
def calc_statistics2(x):
# 使用系统函数验证
mu = np.mean(x, axis=0)
sigma = np.std(x, axis=0)
skew = stats.skew(x)
kurtosis = stats.kurtosis(x)
print('函数库计算均值、标准差、偏度、峰度:', mu, sigma, skew, kurtosis)
if __name__ == '__main__':
d = np.random.randn(5)
print(d)
print(d.shape)
calc_statistics(d)
calc_statistics2(d) | zh | 0.956131 | # coding:utf-8 # # 样本个数 # 手动计算 # 使用系统函数验证 | 3.48782 | 3 |
sabnzbd/lang.py | jxyzn/sabnzbd | 0 | 6633135 | #!/usr/bin/python3 -OO
# -*- coding: utf-8 -*-
# Copyright 2011-2021 The SABnzbd-Team <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.lang - Language support
"""
# This module should be the first non-standard import to
# be done at the top of the application's main file.
# This will ensure that the default language is available
# and the special functions are active.
# This module cannot import any application modules!!
#
# Required keywords for pygettext.py: -k T -k TT
#
# The following pseudo-builtins are provided.
# T() Unicode translation
# TT() Dummy translation, use to mark table entries for POT scanning
import gettext
import builtins
import glob
import os
import locale
__all__ = ["set_locale_info", "set_language", "list_languages"]
_DOMAIN = "" # Holds translation domain
_LOCALEDIR = "" # Holds path to the translation base folder
def set_locale_info(domain, localedir):
""" Setup the domain and localedir for translations """
global _DOMAIN, _LOCALEDIR
_DOMAIN = domain
_LOCALEDIR = localedir
def set_language(language=None):
""" Activate language, empty language will set default texts. """
if not language:
language = ""
lng = gettext.translation(_DOMAIN, _LOCALEDIR, [language], fallback=True)
builtins.__dict__["T"] = lng.gettext
builtins.__dict__["TT"] = lambda x: str(x) # Use in text tables
def list_languages():
"""Return sorted list of (lang-code, lang-string) pairs,
representing the available languages.
When any language file is found, the default tuple ('en', 'English')
will be included. Otherwise an empty list is returned.
"""
# Find all the MO files.
lst = []
for path in glob.glob(os.path.join(_LOCALEDIR, "*")):
if os.path.isdir(path) and not path.endswith("en"):
lngname = os.path.basename(path)
lng = locale.normalize(lngname)
# Example: 'pt_BR.ISO8859-1'
lng_short = lng[: lng.find("_")]
lng_full = lng[: lng.find(".")]
# First try full language string, e.g. 'pt_BR'
language = LanguageTable.get(lng_full, (lng_full, lng_full))
if language[0] == lng_full:
# Full language string not defined: try short form, e.g. 'pt'
language = LanguageTable.get(lng_short, (lng_short, lng_short))
lng = lng_short
else:
lng = lng_full
language = language[1]
lst.append((lng, language))
lst.append(("en", "English"))
lst.sort()
return lst
def is_rtl(lang):
return LanguageTable.get(lang, "en")[3]
# English name, native name, code page, right-to-left
LanguageTable = {
"aa": ("Afar", "Afaraf", 0, False),
"af": ("Afrikaans", "Afrikaans", 0, False),
"ak": ("Akan", "Akan", 0, False),
"sq": ("Albanian", "Shqip", 0, False),
"an": ("Aragonese", "Aragonés", 0, False),
"ae": ("Avestan", "Avesta", 0, False),
"ay": ("Aymara", "Aymararu", 0, False),
"bm": ("Bambara", "Bamanankan", 0, False),
"eu": ("Basque", "Euskara", 0, False),
"bi": ("Bislama", "Bislama", 0, False),
"bs": ("Bosnian", "Bosanskijezik", 0, False),
"br": ("Breton", "Brezhoneg", 0, False),
"ca": ("Catalan", "Català", 0, False),
"ch": ("Chamorro", "Chamoru", 0, False),
"kw": ("Cornish", "Kernewek", 0, False),
"co": ("Corsican", "Corsu", 0, False),
"hr": ("Croatian", "Hrvatski", 0, False),
"cs": ("Czech", "Cesky, ceština", 0, False),
"da": ("Danish", "Dansk", 0, False),
"nl": ("Dutch", "Nederlands", 0, False),
"en": ("English", "English", 0, False),
"eo": ("Esperanto", "Esperanto", 0, False),
"et": ("Estonian", "Eesti", 0, False),
"fo": ("Faroese", "Føroyskt", 0, False),
"fj": ("Fijian", "Vosa Vakaviti", 0, False),
"fi": ("Finnish", "Suomi", 0, False),
"fr": ("French", "Français", 0, False),
"gl": ("Galician", "Galego", 0, False),
"de": ("German", "Deutsch", 0, False),
"he": ("Hebrew", "עִבְרִית", 1255, True),
"hz": ("Herero", "Otjiherero", 0, False),
"ho": ("<NAME>", "<NAME>", 0, False),
"hu": ("Hungarian", "Magyar", 0, False),
"id": ("Indonesian", "Bahasa Indonesia", 0, False),
"ga": ("Irish", "Gaeilge", 0, False),
"io": ("Ido", "Ido", 0, False),
"is": ("Icelandic", "Íslenska", 0, False),
"it": ("Italian", "Italiano", 0, False),
"jv": ("Javanese", "BasaJawa", 0, False),
"rw": ("Kinyarwanda", "Ikinyarwanda", 0, False),
"kg": ("Kongo", "KiKongo", 0, False),
"kj": ("Kwanyama", "Kuanyama", 0, False),
"la": ("Latin", "Lingua latina", 0, False),
"lb": ("Luxembourgish", "Lëtzebuergesch", 0, False),
"lg": ("Luganda", "Luganda", 0, False),
"li": ("Limburgish", "Limburgs", 0, False),
"ln": ("Lingala", "Lingála", 0, False),
"lt": ("Lithuanian", "Lietuviukalba", 0, False),
"lv": ("Latvian", "Latviešuvaloda", 0, False),
"gv": ("Manx", "Gaelg", 0, False),
"mg": ("Malagasy", "Malagasy fiteny", 0, False),
"mt": ("Maltese", "Malti", 0, False),
"nb": ("Norwegian Bokmål", "Norsk bokmål", 0, False),
"nn": ("Norwegian Nynorsk", "Norsk nynorsk", 0, False),
"no": ("Norwegian", "Norsk", 0, False),
"oc": ("Occitan", "Occitan", 0, False),
"om": ("Oromo", "<NAME>", 0, False),
"pl": ("Polish", "Polski", 0, False),
"pt": ("Portuguese", "Português", 0, False),
"pt_BR": ("Portuguese Brazillian", "Português Brasileiro", 0, False),
"rm": ("Romansh", "Rumantsch grischun", 0, False),
"rn": ("Kirundi", "kiRundi", 0, False),
"ro": ("Romanian", "Româna", 1250, False),
"sc": ("Sardinian", "Sardu", 0, False),
"se": ("Northern Sami", "Davvisámegiella", 0, False),
"sm": ("Samoan", "Gagana fa'a Samoa", 0, False),
"gd": ("Gaelic", "Gàidhlig", 0, False),
"ru": ("Russian", "русский язык", 1251, False),
"sr": ("Serbian", "српски", 1251, False),
"sn": ("Shona", "Chi Shona", 0, False),
"sk": ("Slovak", "Slovencina", 0, False),
"sl": ("Slovene", "Slovenšcina", 0, False),
"st": ("Southern Sotho", "Sesotho", 0, False),
"es": ("Spanish Castilian", "Español, castellano", 0, False),
"su": ("Sundanese", "Basa Sunda", 0, False),
"sw": ("Swahili", "Kiswahili", 0, False),
"ss": ("Swati", "SiSwati", 0, False),
"sv": ("Swedish", "Svenska", 0, False),
"tn": ("Tswana", "Setswana", 0, False),
"to": ("Tonga (Tonga Islands)", "faka Tonga", 0, False),
"tr": ("Turkish", "Türkçe", 0, False),
"ts": ("Tsonga", "Xitsonga", 0, False),
"tw": ("Twi", "Twi", 0, False),
"ty": ("Tahitian", "Reo Tahiti", 0, False),
"wa": ("Walloon", "Walon", 0, False),
"cy": ("Welsh", "Cymraeg", 0, False),
"wo": ("Wolof", "Wollof", 0, False),
"fy": ("Western Frisian", "Frysk", 0, False),
"xh": ("Xhosa", "isi Xhosa", 0, False),
"yo": ("Yoruba", "Yorùbá", 0, False),
"zu": ("Zulu", "isi Zulu", 0, False),
"zh_CN": ("SimpChinese", "简体中文", 936, False),
}
# Setup a safe null-translation
set_language()
| #!/usr/bin/python3 -OO
# -*- coding: utf-8 -*-
# Copyright 2011-2021 The SABnzbd-Team <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.lang - Language support
"""
# This module should be the first non-standard import to
# be done at the top of the application's main file.
# This will ensure that the default language is available
# and the special functions are active.
# This module cannot import any application modules!!
#
# Required keywords for pygettext.py: -k T -k TT
#
# The following pseudo-builtins are provided.
# T() Unicode translation
# TT() Dummy translation, use to mark table entries for POT scanning
import gettext
import builtins
import glob
import os
import locale
__all__ = ["set_locale_info", "set_language", "list_languages"]
_DOMAIN = "" # Holds translation domain
_LOCALEDIR = "" # Holds path to the translation base folder
def set_locale_info(domain, localedir):
""" Setup the domain and localedir for translations """
global _DOMAIN, _LOCALEDIR
_DOMAIN = domain
_LOCALEDIR = localedir
def set_language(language=None):
""" Activate language, empty language will set default texts. """
if not language:
language = ""
lng = gettext.translation(_DOMAIN, _LOCALEDIR, [language], fallback=True)
builtins.__dict__["T"] = lng.gettext
builtins.__dict__["TT"] = lambda x: str(x) # Use in text tables
def list_languages():
"""Return sorted list of (lang-code, lang-string) pairs,
representing the available languages.
When any language file is found, the default tuple ('en', 'English')
will be included. Otherwise an empty list is returned.
"""
# Find all the MO files.
lst = []
for path in glob.glob(os.path.join(_LOCALEDIR, "*")):
if os.path.isdir(path) and not path.endswith("en"):
lngname = os.path.basename(path)
lng = locale.normalize(lngname)
# Example: 'pt_BR.ISO8859-1'
lng_short = lng[: lng.find("_")]
lng_full = lng[: lng.find(".")]
# First try full language string, e.g. 'pt_BR'
language = LanguageTable.get(lng_full, (lng_full, lng_full))
if language[0] == lng_full:
# Full language string not defined: try short form, e.g. 'pt'
language = LanguageTable.get(lng_short, (lng_short, lng_short))
lng = lng_short
else:
lng = lng_full
language = language[1]
lst.append((lng, language))
lst.append(("en", "English"))
lst.sort()
return lst
def is_rtl(lang):
return LanguageTable.get(lang, "en")[3]
# English name, native name, code page, right-to-left
LanguageTable = {
"aa": ("Afar", "Afaraf", 0, False),
"af": ("Afrikaans", "Afrikaans", 0, False),
"ak": ("Akan", "Akan", 0, False),
"sq": ("Albanian", "Shqip", 0, False),
"an": ("Aragonese", "Aragonés", 0, False),
"ae": ("Avestan", "Avesta", 0, False),
"ay": ("Aymara", "Aymararu", 0, False),
"bm": ("Bambara", "Bamanankan", 0, False),
"eu": ("Basque", "Euskara", 0, False),
"bi": ("Bislama", "Bislama", 0, False),
"bs": ("Bosnian", "Bosanskijezik", 0, False),
"br": ("Breton", "Brezhoneg", 0, False),
"ca": ("Catalan", "Català", 0, False),
"ch": ("Chamorro", "Chamoru", 0, False),
"kw": ("Cornish", "Kernewek", 0, False),
"co": ("Corsican", "Corsu", 0, False),
"hr": ("Croatian", "Hrvatski", 0, False),
"cs": ("Czech", "Cesky, ceština", 0, False),
"da": ("Danish", "Dansk", 0, False),
"nl": ("Dutch", "Nederlands", 0, False),
"en": ("English", "English", 0, False),
"eo": ("Esperanto", "Esperanto", 0, False),
"et": ("Estonian", "Eesti", 0, False),
"fo": ("Faroese", "Føroyskt", 0, False),
"fj": ("Fijian", "Vosa Vakaviti", 0, False),
"fi": ("Finnish", "Suomi", 0, False),
"fr": ("French", "Français", 0, False),
"gl": ("Galician", "Galego", 0, False),
"de": ("German", "Deutsch", 0, False),
"he": ("Hebrew", "עִבְרִית", 1255, True),
"hz": ("Herero", "Otjiherero", 0, False),
"ho": ("<NAME>", "<NAME>", 0, False),
"hu": ("Hungarian", "Magyar", 0, False),
"id": ("Indonesian", "Bahasa Indonesia", 0, False),
"ga": ("Irish", "Gaeilge", 0, False),
"io": ("Ido", "Ido", 0, False),
"is": ("Icelandic", "Íslenska", 0, False),
"it": ("Italian", "Italiano", 0, False),
"jv": ("Javanese", "BasaJawa", 0, False),
"rw": ("Kinyarwanda", "Ikinyarwanda", 0, False),
"kg": ("Kongo", "KiKongo", 0, False),
"kj": ("Kwanyama", "Kuanyama", 0, False),
"la": ("Latin", "Lingua latina", 0, False),
"lb": ("Luxembourgish", "Lëtzebuergesch", 0, False),
"lg": ("Luganda", "Luganda", 0, False),
"li": ("Limburgish", "Limburgs", 0, False),
"ln": ("Lingala", "Lingála", 0, False),
"lt": ("Lithuanian", "Lietuviukalba", 0, False),
"lv": ("Latvian", "Latviešuvaloda", 0, False),
"gv": ("Manx", "Gaelg", 0, False),
"mg": ("Malagasy", "Malagasy fiteny", 0, False),
"mt": ("Maltese", "Malti", 0, False),
"nb": ("Norwegian Bokmål", "Norsk bokmål", 0, False),
"nn": ("Norwegian Nynorsk", "Norsk nynorsk", 0, False),
"no": ("Norwegian", "Norsk", 0, False),
"oc": ("Occitan", "Occitan", 0, False),
"om": ("Oromo", "<NAME>", 0, False),
"pl": ("Polish", "Polski", 0, False),
"pt": ("Portuguese", "Português", 0, False),
"pt_BR": ("Portuguese Brazillian", "Português Brasileiro", 0, False),
"rm": ("Romansh", "Rumantsch grischun", 0, False),
"rn": ("Kirundi", "kiRundi", 0, False),
"ro": ("Romanian", "Româna", 1250, False),
"sc": ("Sardinian", "Sardu", 0, False),
"se": ("Northern Sami", "Davvisámegiella", 0, False),
"sm": ("Samoan", "Gagana fa'a Samoa", 0, False),
"gd": ("Gaelic", "Gàidhlig", 0, False),
"ru": ("Russian", "русский язык", 1251, False),
"sr": ("Serbian", "српски", 1251, False),
"sn": ("Shona", "Chi Shona", 0, False),
"sk": ("Slovak", "Slovencina", 0, False),
"sl": ("Slovene", "Slovenšcina", 0, False),
"st": ("Southern Sotho", "Sesotho", 0, False),
"es": ("Spanish Castilian", "Español, castellano", 0, False),
"su": ("Sundanese", "Basa Sunda", 0, False),
"sw": ("Swahili", "Kiswahili", 0, False),
"ss": ("Swati", "SiSwati", 0, False),
"sv": ("Swedish", "Svenska", 0, False),
"tn": ("Tswana", "Setswana", 0, False),
"to": ("Tonga (Tonga Islands)", "faka Tonga", 0, False),
"tr": ("Turkish", "Türkçe", 0, False),
"ts": ("Tsonga", "Xitsonga", 0, False),
"tw": ("Twi", "Twi", 0, False),
"ty": ("Tahitian", "Reo Tahiti", 0, False),
"wa": ("Walloon", "Walon", 0, False),
"cy": ("Welsh", "Cymraeg", 0, False),
"wo": ("Wolof", "Wollof", 0, False),
"fy": ("Western Frisian", "Frysk", 0, False),
"xh": ("Xhosa", "isi Xhosa", 0, False),
"yo": ("Yoruba", "Yorùbá", 0, False),
"zu": ("Zulu", "isi Zulu", 0, False),
"zh_CN": ("SimpChinese", "简体中文", 936, False),
}
# Setup a safe null-translation
set_language()
| en | 0.76007 | #!/usr/bin/python3 -OO # -*- coding: utf-8 -*- # Copyright 2011-2021 The SABnzbd-Team <<EMAIL>> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. sabnzbd.lang - Language support # This module should be the first non-standard import to # be done at the top of the application's main file. # This will ensure that the default language is available # and the special functions are active. # This module cannot import any application modules!! # # Required keywords for pygettext.py: -k T -k TT # # The following pseudo-builtins are provided. # T() Unicode translation # TT() Dummy translation, use to mark table entries for POT scanning # Holds translation domain # Holds path to the translation base folder Setup the domain and localedir for translations Activate language, empty language will set default texts. # Use in text tables Return sorted list of (lang-code, lang-string) pairs, representing the available languages. When any language file is found, the default tuple ('en', 'English') will be included. Otherwise an empty list is returned. # Find all the MO files. # Example: 'pt_BR.ISO8859-1' # First try full language string, e.g. 'pt_BR' # Full language string not defined: try short form, e.g. 'pt' # English name, native name, code page, right-to-left # Setup a safe null-translation | 1.846977 | 2 |
app/urls.py | Hikasgai/webServicesTest | 0 | 6633136 | from django.conf.urls import include, url
from django.contrib.auth.decorators import login_required
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^calendarioanual$', views.calendarioanual),
url(r'^horario$', views.horarioAsignaturas),
url(r'^getAsignaturas$', views.getAsignaturas),
url(r'^obtenergrupos$', views.obtenerGrupos),
url(r'^asignaturas$', views.obtenerTodasLasAsignaturas),
url(r'^grados$', views.getGrados)
]
| from django.conf.urls import include, url
from django.contrib.auth.decorators import login_required
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^calendarioanual$', views.calendarioanual),
url(r'^horario$', views.horarioAsignaturas),
url(r'^getAsignaturas$', views.getAsignaturas),
url(r'^obtenergrupos$', views.obtenerGrupos),
url(r'^asignaturas$', views.obtenerTodasLasAsignaturas),
url(r'^grados$', views.getGrados)
]
| none | 1 | 1.686801 | 2 |
|
2021/day_6/part_2.py | jcm300/advent_of_code | 1 | 6633137 | <reponame>jcm300/advent_of_code<gh_stars>1-10
import os
import re
import sys
import multiprocessing
from functools import reduce
# Read arguments
if len(sys.argv) != 2:
raise ValueError('Please provide a filename input')
filename = sys.argv[1]
# Read file
file_data = open(os.getcwd() + '/' + filename, 'r')
#
# Parse file
#
text = file_data.read().replace('\n', '')
lantern_fishes = list(map(int, text.split(',')))
#
# Get answer
#
days = 256
grouped_fishes = {}
for i in range(9):
count = lantern_fishes.count(i)
grouped_fishes[i] = count
for day in range(days):
zeros = grouped_fishes[0]
for i in grouped_fishes:
if i < 8:
grouped_fishes[i] = grouped_fishes[i+1]
grouped_fishes[6] += zeros
grouped_fishes[8] = zeros
answer = 0
for fishes in grouped_fishes:
answer += grouped_fishes[fishes]
print(answer)
| import os
import re
import sys
import multiprocessing
from functools import reduce
# Read arguments
if len(sys.argv) != 2:
raise ValueError('Please provide a filename input')
filename = sys.argv[1]
# Read file
file_data = open(os.getcwd() + '/' + filename, 'r')
#
# Parse file
#
text = file_data.read().replace('\n', '')
lantern_fishes = list(map(int, text.split(',')))
#
# Get answer
#
days = 256
grouped_fishes = {}
for i in range(9):
count = lantern_fishes.count(i)
grouped_fishes[i] = count
for day in range(days):
zeros = grouped_fishes[0]
for i in grouped_fishes:
if i < 8:
grouped_fishes[i] = grouped_fishes[i+1]
grouped_fishes[6] += zeros
grouped_fishes[8] = zeros
answer = 0
for fishes in grouped_fishes:
answer += grouped_fishes[fishes]
print(answer) | en | 0.607272 | # Read arguments # Read file # # Parse file # # # Get answer # | 3.29483 | 3 |
app.py | tbotnz/flask-AdminLTE | 6 | 6633138 | from flask import Flask, render_template, redirect, url_for, request, json
app = Flask(__name__)
@app.route("/starter")
def starter():
return render_template("starter.html")
@app.route("/")
@app.route("/index")
def index():
return render_template("index.html")
@app.route("/index2")
def index_two():
return render_template("index2.html")
@app.route("/index3")
def index_three():
return render_template("index3.html")
@app.route("/widgets")
def widgets():
return render_template("pages/widgets.html")
@app.route("/calendar")
def calendar():
return render_template("pages/calendar.html")
@app.route("/gallery")
def gallery():
return render_template("pages/gallery.html")
@app.route("/charts/<template>")
def charts(template):
template = template.replace(".html", "")
return render_template(f"pages/charts/{template}.html")
@app.route("/examples/<template>")
def examples(template):
template = template.replace(".html", "")
return render_template(f"pages/examples/{template}.html")
@app.route("/forms/<template>")
def forms(template):
template = template.replace(".html", "")
return render_template(f"pages/forms/{template}.html")
@app.route("/layout/<template>")
def layout(template):
template = template.replace(".html", "")
return render_template(f"pages/layout/{template}.html")
@app.route("/mailbox/<template>")
def mailbox(template):
template = template.replace(".html", "")
return render_template(f"pages/mailbox/{template}.html")
@app.route("/tables/<template>")
def tables(template):
template = template.replace(".html", "")
return render_template(f"pages/tables/{template}.html")
@app.route("/ui/<template>")
def ui(template):
template = template.replace(".html", "")
return render_template(f"pages/UI/{template}.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=10001, threaded=True)
| from flask import Flask, render_template, redirect, url_for, request, json
app = Flask(__name__)
@app.route("/starter")
def starter():
return render_template("starter.html")
@app.route("/")
@app.route("/index")
def index():
return render_template("index.html")
@app.route("/index2")
def index_two():
return render_template("index2.html")
@app.route("/index3")
def index_three():
return render_template("index3.html")
@app.route("/widgets")
def widgets():
return render_template("pages/widgets.html")
@app.route("/calendar")
def calendar():
return render_template("pages/calendar.html")
@app.route("/gallery")
def gallery():
return render_template("pages/gallery.html")
@app.route("/charts/<template>")
def charts(template):
template = template.replace(".html", "")
return render_template(f"pages/charts/{template}.html")
@app.route("/examples/<template>")
def examples(template):
template = template.replace(".html", "")
return render_template(f"pages/examples/{template}.html")
@app.route("/forms/<template>")
def forms(template):
template = template.replace(".html", "")
return render_template(f"pages/forms/{template}.html")
@app.route("/layout/<template>")
def layout(template):
template = template.replace(".html", "")
return render_template(f"pages/layout/{template}.html")
@app.route("/mailbox/<template>")
def mailbox(template):
template = template.replace(".html", "")
return render_template(f"pages/mailbox/{template}.html")
@app.route("/tables/<template>")
def tables(template):
template = template.replace(".html", "")
return render_template(f"pages/tables/{template}.html")
@app.route("/ui/<template>")
def ui(template):
template = template.replace(".html", "")
return render_template(f"pages/UI/{template}.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=10001, threaded=True)
| none | 1 | 2.7288 | 3 |
|
patch-host.py | dwbfox/patch-hosts | 0 | 6633139 | <gh_stars>0
import urllib.request as requests
import shutil
import os
hosts_dir = 'C:\\Windows\\System32\\drivers\\etc\\'
hosts_download = 'http://winhelp2002.mvps.org/hosts.txt'
print('Download updated hosts file...');
with open('hosts.data', 'wbc') as hfile:
hfile.write(requests.urlopen(hosts_download).read())
try:
print('Making a backup of existing hosts file...');
shutil.move(hosts_dir + 'hosts', hosts_dir + 'hosts.bak')
print('Copying over updated hosts file...');
shutil.move('hosts.data', hosts_dir + 'hosts')
except IOError as e:
print('An error occured while patching hosts file. Make sure this script is running as root!');
| import urllib.request as requests
import shutil
import os
hosts_dir = 'C:\\Windows\\System32\\drivers\\etc\\'
hosts_download = 'http://winhelp2002.mvps.org/hosts.txt'
print('Download updated hosts file...');
with open('hosts.data', 'wbc') as hfile:
hfile.write(requests.urlopen(hosts_download).read())
try:
print('Making a backup of existing hosts file...');
shutil.move(hosts_dir + 'hosts', hosts_dir + 'hosts.bak')
print('Copying over updated hosts file...');
shutil.move('hosts.data', hosts_dir + 'hosts')
except IOError as e:
print('An error occured while patching hosts file. Make sure this script is running as root!'); | none | 1 | 2.829788 | 3 |
|
apps/amcm/migrations/0061_auto_20220503_1309.py | agsneutron/asociacion_mexicana_cuarto_milla | 0 | 6633140 | <gh_stars>0
# Generated by Django 3.2.6 on 2022-05-03 18:09
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('amcm', '0060_auto_20220427_1856'),
]
operations = [
migrations.AddField(
model_name='estadocuentadetalle',
name='ejemplares',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='Ejemplares'),
),
migrations.AlterField(
model_name='credito',
name='fecha_pago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 5, 3, 18, 9, 50, 205944, tzinfo=utc), null=True, verbose_name='Fecha de pago'),
),
migrations.AlterField(
model_name='credito',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 205930, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='cuentaspago',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 196461, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='elegible',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 196723, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='estadocuenta',
name='fecha_registro',
field=models.DateTimeField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 206228, tzinfo=utc), editable=False, verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='estadocuentadetalle',
name='fecha_registro',
field=models.DateTimeField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 206578, tzinfo=utc), editable=False, verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='pago',
name='fechaPago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 5, 3, 18, 9, 50, 194908, tzinfo=utc), null=True, verbose_name='Fecha del Pago'),
),
migrations.AlterField(
model_name='pago',
name='fechaRegistro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 194923, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='recibo',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 205417, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='referenciaformapago',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 205052, tzinfo=utc), verbose_name='Fecha de registro'),
),
]
| # Generated by Django 3.2.6 on 2022-05-03 18:09
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('amcm', '0060_auto_20220427_1856'),
]
operations = [
migrations.AddField(
model_name='estadocuentadetalle',
name='ejemplares',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='Ejemplares'),
),
migrations.AlterField(
model_name='credito',
name='fecha_pago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 5, 3, 18, 9, 50, 205944, tzinfo=utc), null=True, verbose_name='Fecha de pago'),
),
migrations.AlterField(
model_name='credito',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 205930, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='cuentaspago',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 196461, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='elegible',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 196723, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='estadocuenta',
name='fecha_registro',
field=models.DateTimeField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 206228, tzinfo=utc), editable=False, verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='estadocuentadetalle',
name='fecha_registro',
field=models.DateTimeField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 206578, tzinfo=utc), editable=False, verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='pago',
name='fechaPago',
field=models.DateField(blank=True, default=datetime.datetime(2022, 5, 3, 18, 9, 50, 194908, tzinfo=utc), null=True, verbose_name='Fecha del Pago'),
),
migrations.AlterField(
model_name='pago',
name='fechaRegistro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 194923, tzinfo=utc), verbose_name='Fecha de Registro'),
),
migrations.AlterField(
model_name='recibo',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 205417, tzinfo=utc), verbose_name='Fecha de registro'),
),
migrations.AlterField(
model_name='referenciaformapago',
name='fecha_registro',
field=models.DateField(default=datetime.datetime(2022, 5, 3, 18, 9, 50, 205052, tzinfo=utc), verbose_name='Fecha de registro'),
),
] | en | 0.837247 | # Generated by Django 3.2.6 on 2022-05-03 18:09 | 1.713853 | 2 |
0751-0800/0771-JewelsAndStones/JewelsAndStones.py | Sun-Zhen/leetcode | 3 | 6633141 | <gh_stars>1-10
# -*- coding:utf-8 -*-
"""
@author: Alden
@email: <EMAIL>
@date: 2018/3/30
@version: 1.0.0.0
"""
class Solution(object):
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
res = 0
target_list = list()
for tmp in J:
target_list.append(tmp)
for tmp in S:
if tmp in target_list:
res += 1
return res
if __name__ == "__main__":
s = Solution()
s.numJewelsInStones("aA", "aAAbbbb")
s.numJewelsInStones("z", "ZZ")
| # -*- coding:utf-8 -*-
"""
@author: Alden
@email: <EMAIL>
@date: 2018/3/30
@version: 1.0.0.0
"""
class Solution(object):
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
res = 0
target_list = list()
for tmp in J:
target_list.append(tmp)
for tmp in S:
if tmp in target_list:
res += 1
return res
if __name__ == "__main__":
s = Solution()
s.numJewelsInStones("aA", "aAAbbbb")
s.numJewelsInStones("z", "ZZ") | en | 0.288929 | # -*- coding:utf-8 -*- @author: Alden @email: <EMAIL> @date: 2018/3/30 @version: 1.0.0.0 :type J: str :type S: str :rtype: int | 3.657952 | 4 |
sparrow_kms.py | suramrit/sparrow | 15 | 6633142 | <filename>sparrow_kms.py<gh_stars>10-100
#!/usr/bin/env python
import boto3
import base64
import random
import json
from twython import Twython
# Credentials setup
# Loads in 'creds.json' values as a dictionary
with open('creds.json') as f:
credentials = json.loads(f.read())
def decrypt(ciphertext):
"""Decrypt ciphertext with KMS"""
kms = boto3.client('kms')
print 'Decrypting ciphertext with KMS'
plaintext = kms.decrypt(CiphertextBlob = base64.b64decode(ciphertext))['Plaintext']
return plaintext
# Decrypts API keys and sets config values from the config file
# Make sure this is loading KMS encrypted values in creds.json
# or else you may see a TypeError: Incorrect padding error
CONSUMER_KEY = decrypt(credentials["consumer_key"])
CONSUMER_SECRET = decrypt(credentials["consumer_secret"])
ACCESS_TOKEN_KEY = decrypt(credentials["access_token_key"])
ACCESS_TOKEN_SECRET = decrypt(credentials["access_token_secret"])
# Create the Twython Twitter client using our credentials
twitter = Twython(CONSUMER_KEY, CONSUMER_SECRET,
ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
# Sample random tweets
potential_tweets = [
'This is my first tweet with Sparrow by @fmc_sea - https://github.com/fernando-mc/sparrow',
'Wow! Isn\'t Sparrow by @fmc_sea just the coolest! https://github.com/fernando-mc/sparrow',
'Jeez! Everyone should learn about AWS Lambda and Twitter Bots from @fmc_sea'
]
def send_tweet(tweet_text):
"""Sends a tweet to Twitter"""
twitter.update_status(status = tweet_text)
def handler(event,context):
"""Sends random tweet from list of potential tweets"""
send_tweet(random.choice(potential_tweets))
| <filename>sparrow_kms.py<gh_stars>10-100
#!/usr/bin/env python
import boto3
import base64
import random
import json
from twython import Twython
# Credentials setup
# Loads in 'creds.json' values as a dictionary
with open('creds.json') as f:
credentials = json.loads(f.read())
def decrypt(ciphertext):
"""Decrypt ciphertext with KMS"""
kms = boto3.client('kms')
print 'Decrypting ciphertext with KMS'
plaintext = kms.decrypt(CiphertextBlob = base64.b64decode(ciphertext))['Plaintext']
return plaintext
# Decrypts API keys and sets config values from the config file
# Make sure this is loading KMS encrypted values in creds.json
# or else you may see a TypeError: Incorrect padding error
CONSUMER_KEY = decrypt(credentials["consumer_key"])
CONSUMER_SECRET = decrypt(credentials["consumer_secret"])
ACCESS_TOKEN_KEY = decrypt(credentials["access_token_key"])
ACCESS_TOKEN_SECRET = decrypt(credentials["access_token_secret"])
# Create the Twython Twitter client using our credentials
twitter = Twython(CONSUMER_KEY, CONSUMER_SECRET,
ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
# Sample random tweets
potential_tweets = [
'This is my first tweet with Sparrow by @fmc_sea - https://github.com/fernando-mc/sparrow',
'Wow! Isn\'t Sparrow by @fmc_sea just the coolest! https://github.com/fernando-mc/sparrow',
'Jeez! Everyone should learn about AWS Lambda and Twitter Bots from @fmc_sea'
]
def send_tweet(tweet_text):
"""Sends a tweet to Twitter"""
twitter.update_status(status = tweet_text)
def handler(event,context):
"""Sends random tweet from list of potential tweets"""
send_tweet(random.choice(potential_tweets))
| en | 0.601325 | #!/usr/bin/env python # Credentials setup # Loads in 'creds.json' values as a dictionary Decrypt ciphertext with KMS # Decrypts API keys and sets config values from the config file # Make sure this is loading KMS encrypted values in creds.json # or else you may see a TypeError: Incorrect padding error # Create the Twython Twitter client using our credentials # Sample random tweets Sends a tweet to Twitter Sends random tweet from list of potential tweets | 2.790725 | 3 |
0000_students_work/2021tro/projection_local_gaussian.py | takuya-ki/wrs | 23 | 6633143 | <filename>0000_students_work/2021tro/projection_local_gaussian.py
import numpy as np
import modeling.geometric_model as gm
import modeling.collision_model as cm
import visualization.panda.world as wd
import basis.robot_math as rm
import math
from scipy.spatial import cKDTree
import vision.depth_camera.surface.gaussian_surface as gs
base = wd.World(cam_pos=np.array([-.3,-.9,.3]), lookat_pos=np.array([0,0,0]))
# gm.gen_frame().attach_to(base)
bowl_model = cm.CollisionModel(initor="./objects/bowl.stl")
bowl_model.set_rgba([.3,.3,.3,.3])
bowl_model.set_rotmat(rm.rotmat_from_euler(math.pi,0,0))
bowl_model.attach_to(base)
pn_direction = np.array([0, 0, -1])
bowl_samples, bowl_sample_normals = bowl_model.sample_surface(toggle_option='normals', radius=.002)
selection = bowl_sample_normals.dot(-pn_direction)>.1
bowl_samples = bowl_samples[selection]
bowl_sample_normals=bowl_sample_normals[selection]
tree = cKDTree(bowl_samples)
pt_direction = rm.orthogonal_vector(pn_direction, toggle_unit=True)
tmp_direction = np.cross(pn_direction, pt_direction)
plane_rotmat = np.column_stack((pt_direction, tmp_direction, pn_direction))
homomat=np.eye(4)
homomat[:3,:3] = plane_rotmat
homomat[:3,3] = np.array([-.07,-.03,.1])
twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[1,1,1,.3])
twod_plane.attach_to(base)
circle_radius=.05
line_segs = [[homomat[:3,3], homomat[:3,3]+pt_direction*.05], [homomat[:3,3]+pt_direction*.05, homomat[:3,3]+pt_direction*.05+tmp_direction*.05],
[homomat[:3,3]+pt_direction*.05+tmp_direction*.05, homomat[:3,3]+tmp_direction*.05], [homomat[:3,3]+tmp_direction*.05, homomat[:3,3]]]
# gm.gen_linesegs(line_segs).attach_to(base)
for sec in line_segs:
gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base)
epos = (line_segs[0][1]-line_segs[0][0])*.7+line_segs[0][0]
gm.gen_arrow(spos=line_segs[0][0], epos=epos, thickness=0.004).attach_to(base)
spt = homomat[:3,3]
# gm.gen_stick(spt, spt + pn_direction * 10, rgba=[0,1,0,1]).attach_to(base)
# base.run()
gm.gen_dasharrow(spt, spt-pn_direction*.07, thickness=.004).attach_to(base) # p0
cpt, cnrml = bowl_model.ray_hit(spt, spt + pn_direction * 10000, option='closest')
gm.gen_dashstick(spt, cpt, rgba=[.57,.57,.57,.7], thickness=0.003).attach_to(base)
gm.gen_sphere(pos=cpt, radius=.005).attach_to(base)
gm.gen_dasharrow(cpt, cpt-pn_direction*.07, thickness=.004).attach_to(base) # p0
gm.gen_dasharrow(cpt, cpt+cnrml*.07, thickness=.004).attach_to(base) # p0
angle = rm.angle_between_vectors(-pn_direction, cnrml)
vec = np.cross(-pn_direction, cnrml)
rotmat = rm.rotmat_from_axangle(vec, angle)
new_plane_homomat = np.eye(4)
new_plane_homomat[:3,:3] = rotmat.dot(homomat[:3,:3])
new_plane_homomat[:3,3] = cpt
twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=new_plane_homomat, rgba=[1,1,1,.3])
twod_plane.attach_to(base)
new_line_segs = [[cpt, cpt+rotmat.dot(pt_direction)*.05],
[cpt+rotmat.dot(pt_direction)*.05, cpt+rotmat.dot(pt_direction)*.05+rotmat.dot(tmp_direction)*.05],
[cpt+rotmat.dot(pt_direction)*.05+rotmat.dot(tmp_direction)*.05, cpt+rotmat.dot(tmp_direction)*.05],
[cpt+rotmat.dot(tmp_direction)*.05, cpt]]
# gm.gen_linesegs(new_line_segs).attach_to(base)
# for sec in [new_line_segs[0]]:
# gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base)
epos = (new_line_segs[0][1]-new_line_segs[0][0])*.7+new_line_segs[0][0]
gm.gen_arrow(spos=new_line_segs[0][0], epos=epos, thickness=0.004).attach_to(base)
t_cpt = cpt
last_normal = cnrml
direction = rotmat.dot(pt_direction)
tmp_direction = rotmat.dot(tmp_direction)
n=5
for tick in range(1, n+1):
t_npt = cpt+direction*.05/n
gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.025, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base)
nearby_sample_ids = tree.query_ball_point(t_npt, .005)
nearby_samples = bowl_samples[nearby_sample_ids]
gm.GeometricModel(nearby_samples).attach_to(base)
plane_center, plane_normal = rm.fit_plane(nearby_samples)
plane_tangential = rm.orthogonal_vector(plane_normal)
plane_tmp = np.cross(plane_normal, plane_tangential)
plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal))
nearby_samples_on_xy = plane_rotmat.T.dot((nearby_samples-plane_center).T).T
surface = gs.MixedGaussianSurface(nearby_samples_on_xy[:, :2], nearby_samples_on_xy[:,2], n_mix=1)
t_npt_on_xy = plane_rotmat.T.dot(t_npt-plane_center)
projected_t_npt_z_on_xy = surface.get_zdata(np.array([t_npt_on_xy[:2]]))
projected_t_npt_on_xy = np.array([t_npt_on_xy[0], t_npt_on_xy[1], projected_t_npt_z_on_xy[0]])
projected_point = plane_rotmat.dot(projected_t_npt_on_xy)+plane_center
surface_gm = surface.get_gometricmodel([[-.05,.05],[-.05,.05]], rgba=[.5,.7,1,.1])
surface_gm.set_pos(plane_center)
surface_gm.set_rotmat(plane_rotmat)
surface_gm.attach_to(base)
# homomat = np.eye(4)
# homomat[:3,:3]=plane_rotmat
# homomat[:3,3]=plane_center
# twod_plane = gm.gen_box(np.array([.1, .1, .001]), homomat=homomat, rgba=[.5,.7,1,.2]).attach_to(base)
# projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal)
# gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base)
new_normal = rm.unit_vector(t_npt-projected_point)
if pn_direction.dot(new_normal) > .1:
new_normal = -new_normal
gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.025, thickness=0.001).attach_to(base)
angle = rm.angle_between_vectors(last_normal, new_normal)
vec = rm.unit_vector(np.cross(last_normal, new_normal))
new_rotmat = rm.rotmat_from_axangle(vec, angle)
direction = new_rotmat.dot(direction)
tmp_direction = new_rotmat.dot(tmp_direction)
# new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)],
# [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]]
# gm.gen_linesegs(new_line_segs).attach_to(base)
gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)
cpt=projected_point
last_normal = new_normal
# break
t_cpt = cpt
direction = new_rotmat.dot(tmp_direction)
for tick in range(1, n+1):
t_npt = cpt+direction*.05/n
gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.025, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base)
nearby_sample_ids = tree.query_ball_point(t_npt, .005)
nearby_samples = bowl_samples[nearby_sample_ids]
gm.GeometricModel(nearby_samples).attach_to(base)
plane_center, plane_normal = rm.fit_plane(nearby_samples)
plane_tangential = rm.orthogonal_vector(plane_normal)
plane_tmp = np.cross(plane_normal, plane_tangential)
plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal))
nearby_samples_on_xy = plane_rotmat.T.dot((nearby_samples-plane_center).T).T
surface = gs.MixedGaussianSurface(nearby_samples_on_xy[:, :2], nearby_samples_on_xy[:,2], n_mix=1)
t_npt_on_xy = plane_rotmat.T.dot(t_npt-plane_center)
projected_t_npt_z_on_xy = surface.get_zdata(np.array([t_npt_on_xy[:2]]))
projected_t_npt_on_xy = np.array([t_npt_on_xy[0], t_npt_on_xy[1], projected_t_npt_z_on_xy[0]])
projected_point = plane_rotmat.dot(projected_t_npt_on_xy)+plane_center
surface_gm = surface.get_gometricmodel([[-.05,.05],[-.05,.05]], rgba=[.5,.7,1,.1])
surface_gm.set_pos(plane_center)
surface_gm.set_rotmat(plane_rotmat)
surface_gm.attach_to(base)
# homomat = np.eye(4)
# homomat[:3,:3]=plane_rotmat
# homomat[:3,3]=plane_center
# # if tick == 5:
# gm.gen_box(np.array([.1, .1, .001]), homomat=homomat, rgba=[.5,.7,1,.1]).attach_to(base)
# projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal)
# gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base)
new_normal = rm.unit_vector(t_npt-projected_point)
if pn_direction.dot(new_normal) > .1:
new_normal = -new_normal
gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.025, thickness=0.001).attach_to(base)
angle = rm.angle_between_vectors(last_normal, new_normal)
vec = rm.unit_vector(np.cross(last_normal, new_normal))
new_rotmat = rm.rotmat_from_axangle(vec, angle)
# direction = new_rotmat.dot(direction)
direction = new_rotmat.dot(tmp_direction)
# new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)],
# [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]]
# gm.gen_linesegs(new_line_segs).attach_to(base)
gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)
cpt=projected_point
last_normal = new_normal
# break
#
# t_cpt = cpt
# direction = new_rotmat.dot(-pt_direction)
# for tick in range(1, n+1):
# t_npt = cpt+direction*.05/n
# # gm.gen_arrow(spos=cpt, epos=t_npt, thickness=0.001, rgba=[0,1,1,1]).attach_to(base)
# # gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.015, thickness=0.001, rgba=[1,1,0,1]).attach_to(base)
# nearby_sample_ids = tree.query_ball_point(t_npt, .0015)
# nearby_samples = bowl_samples[nearby_sample_ids]
# # gm.GeometricModel(nearby_samples).attach_to(base)
# plane_center, plane_normal = rm.fit_plane(nearby_samples)
# plane_tangential = rm.orthogonal_vector(plane_normal)
# plane_tmp = np.cross(plane_normal, plane_tangential)
# plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal))
# homomat = np.eye(4)
# homomat[:3,:3]=plane_rotmat
# homomat[:3,3]=plane_center
# # twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[.5,.7,1,.1]).attach_to(base)
# projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal)
# # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base)
# new_normal = rm.unit_vector(t_npt-projected_point)
# if pn_direction.dot(new_normal) > .1:
# new_normal = -new_normal
# # gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.015, thickness=0.001).attach_to(base)
# angle = rm.angle_between_vectors(last_normal, new_normal)
# vec = rm.unit_vector(np.cross(last_normal, new_normal))
# new_rotmat = rm.rotmat_from_axangle(vec, angle)
# # direction = new_rotmat.dot(direction)
# direction = new_rotmat.dot(-pt_direction)
# # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)],
# # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]]
# # gm.gen_linesegs(new_line_segs).attach_to(base)
# gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)
# cpt=projected_point
# last_normal = new_normal
# # if tick ==2:
# # break
#
# t_cpt = cpt
# direction = new_rotmat.dot(-tmp_direction)
# for tick in range(1, n+1):
# t_npt = cpt+direction*.05/n
# # gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.015, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base)
# nearby_sample_ids = tree.query_ball_point(t_npt, .0015)
# nearby_samples = bowl_samples[nearby_sample_ids]
# # gm.GeometricModel(nearby_samples).attach_to(base)
# plane_center, plane_normal = rm.fit_plane(nearby_samples)
# plane_tangential = rm.orthogonal_vector(plane_normal)
# plane_tmp = np.cross(plane_normal, plane_tangential)
# plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal))
# homomat = np.eye(4)
# homomat[:3,:3]=plane_rotmat
# homomat[:3,3]=plane_center
# # twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[.5,.7,1,.3]).attach_to(base)
# projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal)
# # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base)
# new_normal = rm.unit_vector(t_npt-projected_point)
# if pn_direction.dot(new_normal) > .1:
# new_normal = -new_normal
# # gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.015, thickness=0.001).attach_to(base)
# angle = rm.angle_between_vectors(last_normal, new_normal)
# vec = rm.unit_vector(np.cross(last_normal, new_normal))
# new_rotmat = rm.rotmat_from_axangle(vec, angle)
# # direction = new_rotmat.dot(direction)
# direction = new_rotmat.dot(-tmp_direction)
# # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)],
# # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]]
# # gm.gen_linesegs(new_line_segs).attach_to(base)
# gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)
# cpt=projected_point
# last_normal = new_normal
# # break
base.run()
| <filename>0000_students_work/2021tro/projection_local_gaussian.py
import numpy as np
import modeling.geometric_model as gm
import modeling.collision_model as cm
import visualization.panda.world as wd
import basis.robot_math as rm
import math
from scipy.spatial import cKDTree
import vision.depth_camera.surface.gaussian_surface as gs
base = wd.World(cam_pos=np.array([-.3,-.9,.3]), lookat_pos=np.array([0,0,0]))
# gm.gen_frame().attach_to(base)
bowl_model = cm.CollisionModel(initor="./objects/bowl.stl")
bowl_model.set_rgba([.3,.3,.3,.3])
bowl_model.set_rotmat(rm.rotmat_from_euler(math.pi,0,0))
bowl_model.attach_to(base)
pn_direction = np.array([0, 0, -1])
bowl_samples, bowl_sample_normals = bowl_model.sample_surface(toggle_option='normals', radius=.002)
selection = bowl_sample_normals.dot(-pn_direction)>.1
bowl_samples = bowl_samples[selection]
bowl_sample_normals=bowl_sample_normals[selection]
tree = cKDTree(bowl_samples)
pt_direction = rm.orthogonal_vector(pn_direction, toggle_unit=True)
tmp_direction = np.cross(pn_direction, pt_direction)
plane_rotmat = np.column_stack((pt_direction, tmp_direction, pn_direction))
homomat=np.eye(4)
homomat[:3,:3] = plane_rotmat
homomat[:3,3] = np.array([-.07,-.03,.1])
twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[1,1,1,.3])
twod_plane.attach_to(base)
circle_radius=.05
line_segs = [[homomat[:3,3], homomat[:3,3]+pt_direction*.05], [homomat[:3,3]+pt_direction*.05, homomat[:3,3]+pt_direction*.05+tmp_direction*.05],
[homomat[:3,3]+pt_direction*.05+tmp_direction*.05, homomat[:3,3]+tmp_direction*.05], [homomat[:3,3]+tmp_direction*.05, homomat[:3,3]]]
# gm.gen_linesegs(line_segs).attach_to(base)
for sec in line_segs:
gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base)
epos = (line_segs[0][1]-line_segs[0][0])*.7+line_segs[0][0]
gm.gen_arrow(spos=line_segs[0][0], epos=epos, thickness=0.004).attach_to(base)
spt = homomat[:3,3]
# gm.gen_stick(spt, spt + pn_direction * 10, rgba=[0,1,0,1]).attach_to(base)
# base.run()
gm.gen_dasharrow(spt, spt-pn_direction*.07, thickness=.004).attach_to(base) # p0
cpt, cnrml = bowl_model.ray_hit(spt, spt + pn_direction * 10000, option='closest')
gm.gen_dashstick(spt, cpt, rgba=[.57,.57,.57,.7], thickness=0.003).attach_to(base)
gm.gen_sphere(pos=cpt, radius=.005).attach_to(base)
gm.gen_dasharrow(cpt, cpt-pn_direction*.07, thickness=.004).attach_to(base) # p0
gm.gen_dasharrow(cpt, cpt+cnrml*.07, thickness=.004).attach_to(base) # p0
angle = rm.angle_between_vectors(-pn_direction, cnrml)
vec = np.cross(-pn_direction, cnrml)
rotmat = rm.rotmat_from_axangle(vec, angle)
new_plane_homomat = np.eye(4)
new_plane_homomat[:3,:3] = rotmat.dot(homomat[:3,:3])
new_plane_homomat[:3,3] = cpt
twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=new_plane_homomat, rgba=[1,1,1,.3])
twod_plane.attach_to(base)
new_line_segs = [[cpt, cpt+rotmat.dot(pt_direction)*.05],
[cpt+rotmat.dot(pt_direction)*.05, cpt+rotmat.dot(pt_direction)*.05+rotmat.dot(tmp_direction)*.05],
[cpt+rotmat.dot(pt_direction)*.05+rotmat.dot(tmp_direction)*.05, cpt+rotmat.dot(tmp_direction)*.05],
[cpt+rotmat.dot(tmp_direction)*.05, cpt]]
# gm.gen_linesegs(new_line_segs).attach_to(base)
# for sec in [new_line_segs[0]]:
# gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base)
epos = (new_line_segs[0][1]-new_line_segs[0][0])*.7+new_line_segs[0][0]
gm.gen_arrow(spos=new_line_segs[0][0], epos=epos, thickness=0.004).attach_to(base)
t_cpt = cpt
last_normal = cnrml
direction = rotmat.dot(pt_direction)
tmp_direction = rotmat.dot(tmp_direction)
n=5
for tick in range(1, n+1):
t_npt = cpt+direction*.05/n
gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.025, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base)
nearby_sample_ids = tree.query_ball_point(t_npt, .005)
nearby_samples = bowl_samples[nearby_sample_ids]
gm.GeometricModel(nearby_samples).attach_to(base)
plane_center, plane_normal = rm.fit_plane(nearby_samples)
plane_tangential = rm.orthogonal_vector(plane_normal)
plane_tmp = np.cross(plane_normal, plane_tangential)
plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal))
nearby_samples_on_xy = plane_rotmat.T.dot((nearby_samples-plane_center).T).T
surface = gs.MixedGaussianSurface(nearby_samples_on_xy[:, :2], nearby_samples_on_xy[:,2], n_mix=1)
t_npt_on_xy = plane_rotmat.T.dot(t_npt-plane_center)
projected_t_npt_z_on_xy = surface.get_zdata(np.array([t_npt_on_xy[:2]]))
projected_t_npt_on_xy = np.array([t_npt_on_xy[0], t_npt_on_xy[1], projected_t_npt_z_on_xy[0]])
projected_point = plane_rotmat.dot(projected_t_npt_on_xy)+plane_center
surface_gm = surface.get_gometricmodel([[-.05,.05],[-.05,.05]], rgba=[.5,.7,1,.1])
surface_gm.set_pos(plane_center)
surface_gm.set_rotmat(plane_rotmat)
surface_gm.attach_to(base)
# homomat = np.eye(4)
# homomat[:3,:3]=plane_rotmat
# homomat[:3,3]=plane_center
# twod_plane = gm.gen_box(np.array([.1, .1, .001]), homomat=homomat, rgba=[.5,.7,1,.2]).attach_to(base)
# projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal)
# gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base)
new_normal = rm.unit_vector(t_npt-projected_point)
if pn_direction.dot(new_normal) > .1:
new_normal = -new_normal
gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.025, thickness=0.001).attach_to(base)
angle = rm.angle_between_vectors(last_normal, new_normal)
vec = rm.unit_vector(np.cross(last_normal, new_normal))
new_rotmat = rm.rotmat_from_axangle(vec, angle)
direction = new_rotmat.dot(direction)
tmp_direction = new_rotmat.dot(tmp_direction)
# new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)],
# [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]]
# gm.gen_linesegs(new_line_segs).attach_to(base)
gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)
cpt=projected_point
last_normal = new_normal
# break
t_cpt = cpt
direction = new_rotmat.dot(tmp_direction)
for tick in range(1, n+1):
t_npt = cpt+direction*.05/n
gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.025, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base)
nearby_sample_ids = tree.query_ball_point(t_npt, .005)
nearby_samples = bowl_samples[nearby_sample_ids]
gm.GeometricModel(nearby_samples).attach_to(base)
plane_center, plane_normal = rm.fit_plane(nearby_samples)
plane_tangential = rm.orthogonal_vector(plane_normal)
plane_tmp = np.cross(plane_normal, plane_tangential)
plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal))
nearby_samples_on_xy = plane_rotmat.T.dot((nearby_samples-plane_center).T).T
surface = gs.MixedGaussianSurface(nearby_samples_on_xy[:, :2], nearby_samples_on_xy[:,2], n_mix=1)
t_npt_on_xy = plane_rotmat.T.dot(t_npt-plane_center)
projected_t_npt_z_on_xy = surface.get_zdata(np.array([t_npt_on_xy[:2]]))
projected_t_npt_on_xy = np.array([t_npt_on_xy[0], t_npt_on_xy[1], projected_t_npt_z_on_xy[0]])
projected_point = plane_rotmat.dot(projected_t_npt_on_xy)+plane_center
surface_gm = surface.get_gometricmodel([[-.05,.05],[-.05,.05]], rgba=[.5,.7,1,.1])
surface_gm.set_pos(plane_center)
surface_gm.set_rotmat(plane_rotmat)
surface_gm.attach_to(base)
# homomat = np.eye(4)
# homomat[:3,:3]=plane_rotmat
# homomat[:3,3]=plane_center
# # if tick == 5:
# gm.gen_box(np.array([.1, .1, .001]), homomat=homomat, rgba=[.5,.7,1,.1]).attach_to(base)
# projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal)
# gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base)
new_normal = rm.unit_vector(t_npt-projected_point)
if pn_direction.dot(new_normal) > .1:
new_normal = -new_normal
gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.025, thickness=0.001).attach_to(base)
angle = rm.angle_between_vectors(last_normal, new_normal)
vec = rm.unit_vector(np.cross(last_normal, new_normal))
new_rotmat = rm.rotmat_from_axangle(vec, angle)
# direction = new_rotmat.dot(direction)
direction = new_rotmat.dot(tmp_direction)
# new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)],
# [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]]
# gm.gen_linesegs(new_line_segs).attach_to(base)
gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)
cpt=projected_point
last_normal = new_normal
# break
#
# t_cpt = cpt
# direction = new_rotmat.dot(-pt_direction)
# for tick in range(1, n+1):
# t_npt = cpt+direction*.05/n
# # gm.gen_arrow(spos=cpt, epos=t_npt, thickness=0.001, rgba=[0,1,1,1]).attach_to(base)
# # gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.015, thickness=0.001, rgba=[1,1,0,1]).attach_to(base)
# nearby_sample_ids = tree.query_ball_point(t_npt, .0015)
# nearby_samples = bowl_samples[nearby_sample_ids]
# # gm.GeometricModel(nearby_samples).attach_to(base)
# plane_center, plane_normal = rm.fit_plane(nearby_samples)
# plane_tangential = rm.orthogonal_vector(plane_normal)
# plane_tmp = np.cross(plane_normal, plane_tangential)
# plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal))
# homomat = np.eye(4)
# homomat[:3,:3]=plane_rotmat
# homomat[:3,3]=plane_center
# # twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[.5,.7,1,.1]).attach_to(base)
# projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal)
# # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base)
# new_normal = rm.unit_vector(t_npt-projected_point)
# if pn_direction.dot(new_normal) > .1:
# new_normal = -new_normal
# # gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.015, thickness=0.001).attach_to(base)
# angle = rm.angle_between_vectors(last_normal, new_normal)
# vec = rm.unit_vector(np.cross(last_normal, new_normal))
# new_rotmat = rm.rotmat_from_axangle(vec, angle)
# # direction = new_rotmat.dot(direction)
# direction = new_rotmat.dot(-pt_direction)
# # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)],
# # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]]
# # gm.gen_linesegs(new_line_segs).attach_to(base)
# gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)
# cpt=projected_point
# last_normal = new_normal
# # if tick ==2:
# # break
#
# t_cpt = cpt
# direction = new_rotmat.dot(-tmp_direction)
# for tick in range(1, n+1):
# t_npt = cpt+direction*.05/n
# # gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.015, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base)
# nearby_sample_ids = tree.query_ball_point(t_npt, .0015)
# nearby_samples = bowl_samples[nearby_sample_ids]
# # gm.GeometricModel(nearby_samples).attach_to(base)
# plane_center, plane_normal = rm.fit_plane(nearby_samples)
# plane_tangential = rm.orthogonal_vector(plane_normal)
# plane_tmp = np.cross(plane_normal, plane_tangential)
# plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal))
# homomat = np.eye(4)
# homomat[:3,:3]=plane_rotmat
# homomat[:3,3]=plane_center
# # twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[.5,.7,1,.3]).attach_to(base)
# projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal)
# # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base)
# new_normal = rm.unit_vector(t_npt-projected_point)
# if pn_direction.dot(new_normal) > .1:
# new_normal = -new_normal
# # gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.015, thickness=0.001).attach_to(base)
# angle = rm.angle_between_vectors(last_normal, new_normal)
# vec = rm.unit_vector(np.cross(last_normal, new_normal))
# new_rotmat = rm.rotmat_from_axangle(vec, angle)
# # direction = new_rotmat.dot(direction)
# direction = new_rotmat.dot(-tmp_direction)
# # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)],
# # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]]
# # gm.gen_linesegs(new_line_segs).attach_to(base)
# gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base)
# cpt=projected_point
# last_normal = new_normal
# # break
base.run()
| en | 0.44875 | # gm.gen_frame().attach_to(base) # gm.gen_linesegs(line_segs).attach_to(base) # gm.gen_stick(spt, spt + pn_direction * 10, rgba=[0,1,0,1]).attach_to(base) # base.run() # p0 # p0 # p0 # gm.gen_linesegs(new_line_segs).attach_to(base) # for sec in [new_line_segs[0]]: # gm.gen_stick(spos=sec[0], epos=sec[1], rgba=[0, 0, 0, 1], thickness=.002, type='round').attach_to(base) # homomat = np.eye(4) # homomat[:3,:3]=plane_rotmat # homomat[:3,3]=plane_center # twod_plane = gm.gen_box(np.array([.1, .1, .001]), homomat=homomat, rgba=[.5,.7,1,.2]).attach_to(base) # projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal) # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base) # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)], # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]] # gm.gen_linesegs(new_line_segs).attach_to(base) # break # homomat = np.eye(4) # homomat[:3,:3]=plane_rotmat # homomat[:3,3]=plane_center # # if tick == 5: # gm.gen_box(np.array([.1, .1, .001]), homomat=homomat, rgba=[.5,.7,1,.1]).attach_to(base) # projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal) # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base) # direction = new_rotmat.dot(direction) # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)], # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]] # gm.gen_linesegs(new_line_segs).attach_to(base) # break # # t_cpt = cpt # direction = new_rotmat.dot(-pt_direction) # for tick in range(1, n+1): # t_npt = cpt+direction*.05/n # # gm.gen_arrow(spos=cpt, epos=t_npt, thickness=0.001, rgba=[0,1,1,1]).attach_to(base) # # gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.015, thickness=0.001, rgba=[1,1,0,1]).attach_to(base) # nearby_sample_ids = tree.query_ball_point(t_npt, .0015) # nearby_samples = bowl_samples[nearby_sample_ids] # # gm.GeometricModel(nearby_samples).attach_to(base) # plane_center, plane_normal = rm.fit_plane(nearby_samples) # plane_tangential = rm.orthogonal_vector(plane_normal) # plane_tmp = np.cross(plane_normal, plane_tangential) # plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal)) # homomat = np.eye(4) # homomat[:3,:3]=plane_rotmat # homomat[:3,3]=plane_center # # twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[.5,.7,1,.1]).attach_to(base) # projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal) # # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base) # new_normal = rm.unit_vector(t_npt-projected_point) # if pn_direction.dot(new_normal) > .1: # new_normal = -new_normal # # gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.015, thickness=0.001).attach_to(base) # angle = rm.angle_between_vectors(last_normal, new_normal) # vec = rm.unit_vector(np.cross(last_normal, new_normal)) # new_rotmat = rm.rotmat_from_axangle(vec, angle) # # direction = new_rotmat.dot(direction) # direction = new_rotmat.dot(-pt_direction) # # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)], # # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]] # # gm.gen_linesegs(new_line_segs).attach_to(base) # gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base) # cpt=projected_point # last_normal = new_normal # # if tick ==2: # # break # # t_cpt = cpt # direction = new_rotmat.dot(-tmp_direction) # for tick in range(1, n+1): # t_npt = cpt+direction*.05/n # # gm.gen_arrow(spos=t_npt, epos=t_npt+last_normal*.015, thickness=0.001, rgba=[1, 1, 0, 1]).attach_to(base) # nearby_sample_ids = tree.query_ball_point(t_npt, .0015) # nearby_samples = bowl_samples[nearby_sample_ids] # # gm.GeometricModel(nearby_samples).attach_to(base) # plane_center, plane_normal = rm.fit_plane(nearby_samples) # plane_tangential = rm.orthogonal_vector(plane_normal) # plane_tmp = np.cross(plane_normal, plane_tangential) # plane_rotmat = np.column_stack((plane_tangential, plane_tmp, plane_normal)) # homomat = np.eye(4) # homomat[:3,:3]=plane_rotmat # homomat[:3,3]=plane_center # # twod_plane = gm.gen_box(np.array([.2, .2, .001]), homomat=homomat, rgba=[.5,.7,1,.3]).attach_to(base) # projected_point = rm.project_to_plane(t_npt, plane_center, plane_normal) # # gm.gen_stick(t_npt, projected_point, thickness=.002).attach_to(base) # new_normal = rm.unit_vector(t_npt-projected_point) # if pn_direction.dot(new_normal) > .1: # new_normal = -new_normal # # gm.gen_arrow(spos=projected_point, epos=projected_point+new_normal*.015, thickness=0.001).attach_to(base) # angle = rm.angle_between_vectors(last_normal, new_normal) # vec = rm.unit_vector(np.cross(last_normal, new_normal)) # new_rotmat = rm.rotmat_from_axangle(vec, angle) # # direction = new_rotmat.dot(direction) # direction = new_rotmat.dot(-tmp_direction) # # new_line_segs = [[cpt, cpt+direction*(.05-tick*.05/n)], # # [cpt+direction*(.05-tick*.05/n), cpt+direction*(.05-tick*.05/n)+tmp_direction*.05]] # # gm.gen_linesegs(new_line_segs).attach_to(base) # gm.gen_stick(spos=cpt, epos=projected_point, rgba=[1,.6,0,1], thickness=.002, type='round').attach_to(base) # cpt=projected_point # last_normal = new_normal # # break | 2.08612 | 2 |
test/ext_rex.py | mawentao007/reading_grab | 0 | 6633144 | <gh_stars>0
# coding: utf-8
import re
from weblib.error import DataNotFound
import six
from test.util import build_grab
from test.util import BaseGrabTestCase
HTML = u"""
<head>
<title>фыва</title>
<meta http-equiv="Content-Type" content="text/html; charset=cp1251" />
</head>
<body>
<div id="bee">
<div class="wrapper">
# russian LA
<strong id="bee-strong">пче</strong><em id="bee-em">ла</em>
</div>
<script type="text/javascript">
mozilla = 777;
</script>
<style type="text/css">
body { color: green; }
</style>
</div>
<div id="fly">
# russian XA
<strong id="fly-strong">му\n</strong><em id="fly-em">ха</em>
</div>
<ul id="num">
<li id="num-1">item #100 2</li>
<li id="num-2">item #2</li>
</ul>
""".encode('cp1251')
class ExtensionRexTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
# Create fake grab instance with fake response
self.g = build_grab()
self.g.fake_response(HTML, charset='cp1251')
def test_rex(self):
# Search unicode rex in unicode body - default case
rex = re.compile(u'(фыва)', re.U)
self.assertEqual(u'фыва', self.g.rex(rex).group(1))
# Search non-unicode rex in byte-string body
rex = re.compile(u'(фыва)'.encode('cp1251'))
self.assertEqual(u'фыва'.encode('cp1251'),
self.g.rex(rex, byte=True).group(1))
# # Search for non-unicode rex in unicode body should fail
pattern = '(фыва)'
# py3 hack
if six.PY3:
pattern = pattern.encode('utf-8')
rex = re.compile(pattern)
self.assertRaises(DataNotFound, lambda: self.g.rex(rex))
# # Search for unicode rex in byte-string body shuld fail
rex = re.compile(u'фыва', re.U)
self.assertRaises(DataNotFound, lambda: self.g.rex(rex, byte=True))
# # Search for unexesting fragment
rex = re.compile(u'(фыва2)', re.U)
self.assertRaises(DataNotFound, lambda: self.g.rex(rex))
def test_assert_rex(self):
self.g.assert_rex(re.compile(u'фыва'))
self.g.assert_rex(re.compile(u'фыва'.encode('cp1251')), byte=True)
def test_assert_rex_text(self):
self.assertEqual(u'ха', self.g.rex_text('<em id="fly-em">([^<]+)'))
| # coding: utf-8
import re
from weblib.error import DataNotFound
import six
from test.util import build_grab
from test.util import BaseGrabTestCase
HTML = u"""
<head>
<title>фыва</title>
<meta http-equiv="Content-Type" content="text/html; charset=cp1251" />
</head>
<body>
<div id="bee">
<div class="wrapper">
# russian LA
<strong id="bee-strong">пче</strong><em id="bee-em">ла</em>
</div>
<script type="text/javascript">
mozilla = 777;
</script>
<style type="text/css">
body { color: green; }
</style>
</div>
<div id="fly">
# russian XA
<strong id="fly-strong">му\n</strong><em id="fly-em">ха</em>
</div>
<ul id="num">
<li id="num-1">item #100 2</li>
<li id="num-2">item #2</li>
</ul>
""".encode('cp1251')
class ExtensionRexTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
# Create fake grab instance with fake response
self.g = build_grab()
self.g.fake_response(HTML, charset='cp1251')
def test_rex(self):
# Search unicode rex in unicode body - default case
rex = re.compile(u'(фыва)', re.U)
self.assertEqual(u'фыва', self.g.rex(rex).group(1))
# Search non-unicode rex in byte-string body
rex = re.compile(u'(фыва)'.encode('cp1251'))
self.assertEqual(u'фыва'.encode('cp1251'),
self.g.rex(rex, byte=True).group(1))
# # Search for non-unicode rex in unicode body should fail
pattern = '(фыва)'
# py3 hack
if six.PY3:
pattern = pattern.encode('utf-8')
rex = re.compile(pattern)
self.assertRaises(DataNotFound, lambda: self.g.rex(rex))
# # Search for unicode rex in byte-string body shuld fail
rex = re.compile(u'фыва', re.U)
self.assertRaises(DataNotFound, lambda: self.g.rex(rex, byte=True))
# # Search for unexesting fragment
rex = re.compile(u'(фыва2)', re.U)
self.assertRaises(DataNotFound, lambda: self.g.rex(rex))
def test_assert_rex(self):
self.g.assert_rex(re.compile(u'фыва'))
self.g.assert_rex(re.compile(u'фыва'.encode('cp1251')), byte=True)
def test_assert_rex_text(self):
self.assertEqual(u'ха', self.g.rex_text('<em id="fly-em">([^<]+)')) | en | 0.307757 | # coding: utf-8 <head> <title>фыва</title> <meta http-equiv="Content-Type" content="text/html; charset=cp1251" /> </head> <body> <div id="bee"> <div class="wrapper"> # russian LA <strong id="bee-strong">пче</strong><em id="bee-em">ла</em> </div> <script type="text/javascript"> mozilla = 777; </script> <style type="text/css"> body { color: green; } </style> </div> <div id="fly"> # russian XA <strong id="fly-strong">му\n</strong><em id="fly-em">ха</em> </div> <ul id="num"> <li id="num-1">item #100 2</li> <li id="num-2">item #2</li> </ul> # Create fake grab instance with fake response # Search unicode rex in unicode body - default case # Search non-unicode rex in byte-string body # # Search for non-unicode rex in unicode body should fail # py3 hack # # Search for unicode rex in byte-string body shuld fail # # Search for unexesting fragment | 2.365368 | 2 |
tests/inlineasm/asmblbx.py | sebastien-riou/micropython | 13,648 | 6633145 | <filename>tests/inlineasm/asmblbx.py
# test bl and bx instructions
@micropython.asm_thumb
def f(r0):
# jump over the internal functions
b(entry)
label(func1)
add(r0, 2)
bx(lr)
label(func2)
sub(r0, 1)
bx(lr)
label(entry)
bl(func1)
bl(func2)
print(f(0))
print(f(1))
| <filename>tests/inlineasm/asmblbx.py
# test bl and bx instructions
@micropython.asm_thumb
def f(r0):
# jump over the internal functions
b(entry)
label(func1)
add(r0, 2)
bx(lr)
label(func2)
sub(r0, 1)
bx(lr)
label(entry)
bl(func1)
bl(func2)
print(f(0))
print(f(1))
| en | 0.654669 | # test bl and bx instructions # jump over the internal functions | 2.289775 | 2 |
gfftk/compare.py | nextgenusfs/gfftk | 0 | 6633146 | <filename>gfftk/compare.py<gh_stars>0
import sys
from collections import defaultdict, OrderedDict
from natsort import natsorted
from itertools import product
import numpy as np
from .utils import zopen
from .gff import gff2dict
from .interlap import InterLap
from .consensus import getAED
def compare(args):
compareAnnotations(args.reference, args.query, args.fasta, output=args.out)
def gff2interlap(input, fasta):
"""
function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary
"""
inter = defaultdict(InterLap)
Genes = gff2dict(input, fasta)
for k, v in natsorted(list(Genes.items())):
inter[v["contig"]].add((v["location"][0], v["location"][1], k))
return inter, Genes
def countFeatures(input):
# given funannotate dictionary, count up some general features
mRNAg, mRNAt, tRNAg, tRNAt = (0,) * 4
for k, v in natsorted(list(input.items())):
if v["type"] == "mRNA":
mRNAg += 1
mRNAt += len(v["ids"])
elif v["type"] == "tRNA":
tRNAg += 1
tRNAt += len(v["ids"])
return len(input), mRNAg, mRNAt, tRNAg, tRNAt
def pairwiseAED(query, reference):
"""
takes a multiple transcripts and sums AED from lowest pairwise comparison and then calculates
the average based on number of transcripts in the query
"""
AEDsum = []
pAED = [float(getAED(a, b)) for a, b in product(query, reference)]
# split into parts to get lowest AED
splitAED = [pAED[i : i + len(query)] for i in range(0, len(pAED), len(query))]
for pair in splitAED:
AEDsum.append(min(pair))
AEDavg = sum(AEDsum) / len(query)
return "{:.3f}".format(AEDavg)
def compareAnnotations(old, new, fasta, output=False):
"""
function takes two GFF annotated genomes and compares gene models
output is a tsv file for each locus and a description of what is different
can handle multiple transcripts per locus
"""
if output:
out = zopen(output, mode="w")
else:
out = sys.stdout
result = {}
global no_change, identicalCDS, refUnique, queryUnique
no_change, identicalCDS, refUnique, queryUnique, totalmatches, totallength = (
0,
) * 6
oldInter, oldGenes = gff2interlap(old, fasta)
NumOldLoci, NumOldGenes, NumOldmRNA, NumOldtRNALoci, NumOldtRNA = countFeatures(
oldGenes
)
sys.stderr.write(
"Reference: {} contigs {} genes\n".format(len(oldInter), len(oldGenes))
)
sys.stderr.write(
"{}\n".format([NumOldLoci, NumOldGenes, NumOldmRNA, NumOldtRNALoci, NumOldtRNA])
)
newInter, newGenes = gff2interlap(new, fasta)
sys.stderr.write(
"Query: {} contigs {} genes\n".format(len(newInter), len(newGenes))
)
NumNewLoci, NumNewGenes, NumNewmRNA, NumNewtRNALoci, NumNewtRNA = countFeatures(
newGenes
)
sys.stderr.write(
"{}\n".format([NumNewLoci, NumNewGenes, NumNewmRNA, NumNewtRNALoci, NumNewtRNA])
)
# now go through the updated annotation, comparing to old annot
ref_seen = set()
for contig in newInter:
for gene in newInter[contig]:
# means this is a new model, so add it
hitList = list(oldInter[contig].find((gene[0], gene[1])))
if len(hitList) < 1:
result[gene[2]] = {
"contig": newGenes[gene[2]]["contig"],
"location": newGenes[gene[2]]["location"],
"ref_type": None,
"ref_location": None,
"query_location": newGenes[gene[2]]["location"],
"query_id": gene[2],
"query_type": newGenes[gene[2]]["mRNA"],
"ref_id": None,
"cdsAED": "1.000",
"exonAED": "1.000",
"ref_transcripts": 0,
"query_transcripts": len(newGenes[gene[2]]["ids"]),
"ref_strand": None,
"query_strand": newGenes[gene[2]]["strand"],
}
continue
# there might be some overlapping transcripts, so get best hit?
hit = []
# get best hit
for z in hitList:
diffs = np.subtract((gene[0], gene[1]), (z[0], z[1]))
totaldiffs = abs(diffs[0]) + abs(diffs[1])
hit.append((totaldiffs, z[2]))
besthit = min(hit)
# get the old annotation
hitInfo = oldGenes.get(besthit[1])
ref_seen.add(besthit[1])
# calculate AED
exonAED = pairwiseAED(newGenes[gene[2]]["mRNA"], hitInfo["mRNA"])
if "mRNA" in newGenes[gene[2]]["type"] and "mRNA" in hitInfo["type"]:
cdsAED = pairwiseAED(newGenes[gene[2]]["CDS"], hitInfo["CDS"])
else:
cdsAED = "0.000"
if not besthit[1] in result:
result[besthit[1]] = {
"contig": newGenes[gene[2]]["contig"],
"location": hitInfo["location"],
"ref_type": hitInfo["type"],
"ref_location": hitInfo["location"],
"query_location": newGenes[gene[2]]["location"],
"query_id": gene[2],
"query_type": newGenes[gene[2]]["type"],
"cdsAED": cdsAED,
"exonAED": exonAED,
"ref_transcripts": len(hitInfo["ids"]),
"query_transcripts": len(newGenes[gene[2]]["ids"]),
"ref_strand": hitInfo["strand"],
"query_strand": newGenes[gene[2]]["strand"],
"ref_id": besthit[1],
}
# get some summary stats as you loop through
if float(exonAED) == 0 and float(cdsAED) == 0:
no_change += 1
elif float(cdsAED) == 0:
identicalCDS += 1
# now add old genes that did not have overlaps
for contig in oldInter:
for gene in oldInter[contig]:
if not gene[2] in ref_seen:
result[gene[2]] = {
"contig": oldGenes[gene[2]]["contig"],
"location": oldGenes[gene[2]]["location"],
"ref_type": oldGenes[gene[2]]["type"][0],
"ref_location": oldGenes[gene[2]]["location"],
"query_location": None,
"query_id": None,
"query_type": None,
"ref_id": gene[2],
"cdsAED": "1.000",
"exonAED": "1.000",
"ref_transcripts": len(oldGenes[gene[2]]["ids"]),
"query_transcripts": 0,
"ref_strand": oldGenes[gene[2]]["strand"],
"query_strand": None,
}
total_cdsAED = []
total_exonAED = []
def _sortDict(d):
return (d[1]["contig"], d[1]["location"][0])
# sort the annotations by contig and start location
sGenes = natsorted(iter(result.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
out.write(
"Reference_Location\tReference_ID\tRef_strand\tRef_Num_Transcripts\tQuery_Location\tQuery_ID\tQuery_strand\tQuery_Num_Transcripts\tmRNA_AED\tCDS_AED\n"
)
for k, v in list(sortedGenes.items()):
Rstart = str(v["location"][0])
Rend = str(v["location"][1])
if v["query_id"]:
Qstart = str(v["query_location"][0])
Qend = str(v["query_location"][1])
else:
Qstart = "None"
Qend = "None"
total_cdsAED.append(float(v["cdsAED"]))
total_exonAED.append(float(v["exonAED"]))
out.write(
"{:}:{:}-{:}\t{:}\t{:}\t{:}\t{:}:{:}-{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n".format(
v["contig"],
Rstart,
Rend,
v["ref_id"],
v["ref_strand"],
v["ref_transcripts"],
v["contig"],
Qstart,
Qend,
v["query_id"],
v["query_strand"],
v["query_transcripts"],
v["exonAED"],
v["cdsAED"],
)
)
Avg_cdsAED = sum(total_cdsAED) / float(len(total_cdsAED))
Avg_exonAED = sum(total_exonAED) / float(len(total_exonAED))
totalPident = 0.00
if totalmatches > 0:
totalPident = totalmatches / totallength
if output:
out.close()
return [
NumOldLoci,
NumOldGenes,
NumOldmRNA,
NumOldtRNALoci,
NumOldtRNA,
refUnique,
no_change,
identicalCDS,
0.000,
0.000,
1,
NumNewLoci,
NumNewGenes,
NumNewmRNA,
NumNewtRNALoci,
NumNewtRNA,
queryUnique,
no_change,
identicalCDS,
Avg_exonAED,
Avg_cdsAED,
totalPident,
]
| <filename>gfftk/compare.py<gh_stars>0
import sys
from collections import defaultdict, OrderedDict
from natsort import natsorted
from itertools import product
import numpy as np
from .utils import zopen
from .gff import gff2dict
from .interlap import InterLap
from .consensus import getAED
def compare(args):
compareAnnotations(args.reference, args.query, args.fasta, output=args.out)
def gff2interlap(input, fasta):
"""
function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary
"""
inter = defaultdict(InterLap)
Genes = gff2dict(input, fasta)
for k, v in natsorted(list(Genes.items())):
inter[v["contig"]].add((v["location"][0], v["location"][1], k))
return inter, Genes
def countFeatures(input):
# given funannotate dictionary, count up some general features
mRNAg, mRNAt, tRNAg, tRNAt = (0,) * 4
for k, v in natsorted(list(input.items())):
if v["type"] == "mRNA":
mRNAg += 1
mRNAt += len(v["ids"])
elif v["type"] == "tRNA":
tRNAg += 1
tRNAt += len(v["ids"])
return len(input), mRNAg, mRNAt, tRNAg, tRNAt
def pairwiseAED(query, reference):
"""
takes a multiple transcripts and sums AED from lowest pairwise comparison and then calculates
the average based on number of transcripts in the query
"""
AEDsum = []
pAED = [float(getAED(a, b)) for a, b in product(query, reference)]
# split into parts to get lowest AED
splitAED = [pAED[i : i + len(query)] for i in range(0, len(pAED), len(query))]
for pair in splitAED:
AEDsum.append(min(pair))
AEDavg = sum(AEDsum) / len(query)
return "{:.3f}".format(AEDavg)
def compareAnnotations(old, new, fasta, output=False):
"""
function takes two GFF annotated genomes and compares gene models
output is a tsv file for each locus and a description of what is different
can handle multiple transcripts per locus
"""
if output:
out = zopen(output, mode="w")
else:
out = sys.stdout
result = {}
global no_change, identicalCDS, refUnique, queryUnique
no_change, identicalCDS, refUnique, queryUnique, totalmatches, totallength = (
0,
) * 6
oldInter, oldGenes = gff2interlap(old, fasta)
NumOldLoci, NumOldGenes, NumOldmRNA, NumOldtRNALoci, NumOldtRNA = countFeatures(
oldGenes
)
sys.stderr.write(
"Reference: {} contigs {} genes\n".format(len(oldInter), len(oldGenes))
)
sys.stderr.write(
"{}\n".format([NumOldLoci, NumOldGenes, NumOldmRNA, NumOldtRNALoci, NumOldtRNA])
)
newInter, newGenes = gff2interlap(new, fasta)
sys.stderr.write(
"Query: {} contigs {} genes\n".format(len(newInter), len(newGenes))
)
NumNewLoci, NumNewGenes, NumNewmRNA, NumNewtRNALoci, NumNewtRNA = countFeatures(
newGenes
)
sys.stderr.write(
"{}\n".format([NumNewLoci, NumNewGenes, NumNewmRNA, NumNewtRNALoci, NumNewtRNA])
)
# now go through the updated annotation, comparing to old annot
ref_seen = set()
for contig in newInter:
for gene in newInter[contig]:
# means this is a new model, so add it
hitList = list(oldInter[contig].find((gene[0], gene[1])))
if len(hitList) < 1:
result[gene[2]] = {
"contig": newGenes[gene[2]]["contig"],
"location": newGenes[gene[2]]["location"],
"ref_type": None,
"ref_location": None,
"query_location": newGenes[gene[2]]["location"],
"query_id": gene[2],
"query_type": newGenes[gene[2]]["mRNA"],
"ref_id": None,
"cdsAED": "1.000",
"exonAED": "1.000",
"ref_transcripts": 0,
"query_transcripts": len(newGenes[gene[2]]["ids"]),
"ref_strand": None,
"query_strand": newGenes[gene[2]]["strand"],
}
continue
# there might be some overlapping transcripts, so get best hit?
hit = []
# get best hit
for z in hitList:
diffs = np.subtract((gene[0], gene[1]), (z[0], z[1]))
totaldiffs = abs(diffs[0]) + abs(diffs[1])
hit.append((totaldiffs, z[2]))
besthit = min(hit)
# get the old annotation
hitInfo = oldGenes.get(besthit[1])
ref_seen.add(besthit[1])
# calculate AED
exonAED = pairwiseAED(newGenes[gene[2]]["mRNA"], hitInfo["mRNA"])
if "mRNA" in newGenes[gene[2]]["type"] and "mRNA" in hitInfo["type"]:
cdsAED = pairwiseAED(newGenes[gene[2]]["CDS"], hitInfo["CDS"])
else:
cdsAED = "0.000"
if not besthit[1] in result:
result[besthit[1]] = {
"contig": newGenes[gene[2]]["contig"],
"location": hitInfo["location"],
"ref_type": hitInfo["type"],
"ref_location": hitInfo["location"],
"query_location": newGenes[gene[2]]["location"],
"query_id": gene[2],
"query_type": newGenes[gene[2]]["type"],
"cdsAED": cdsAED,
"exonAED": exonAED,
"ref_transcripts": len(hitInfo["ids"]),
"query_transcripts": len(newGenes[gene[2]]["ids"]),
"ref_strand": hitInfo["strand"],
"query_strand": newGenes[gene[2]]["strand"],
"ref_id": besthit[1],
}
# get some summary stats as you loop through
if float(exonAED) == 0 and float(cdsAED) == 0:
no_change += 1
elif float(cdsAED) == 0:
identicalCDS += 1
# now add old genes that did not have overlaps
for contig in oldInter:
for gene in oldInter[contig]:
if not gene[2] in ref_seen:
result[gene[2]] = {
"contig": oldGenes[gene[2]]["contig"],
"location": oldGenes[gene[2]]["location"],
"ref_type": oldGenes[gene[2]]["type"][0],
"ref_location": oldGenes[gene[2]]["location"],
"query_location": None,
"query_id": None,
"query_type": None,
"ref_id": gene[2],
"cdsAED": "1.000",
"exonAED": "1.000",
"ref_transcripts": len(oldGenes[gene[2]]["ids"]),
"query_transcripts": 0,
"ref_strand": oldGenes[gene[2]]["strand"],
"query_strand": None,
}
total_cdsAED = []
total_exonAED = []
def _sortDict(d):
return (d[1]["contig"], d[1]["location"][0])
# sort the annotations by contig and start location
sGenes = natsorted(iter(result.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
out.write(
"Reference_Location\tReference_ID\tRef_strand\tRef_Num_Transcripts\tQuery_Location\tQuery_ID\tQuery_strand\tQuery_Num_Transcripts\tmRNA_AED\tCDS_AED\n"
)
for k, v in list(sortedGenes.items()):
Rstart = str(v["location"][0])
Rend = str(v["location"][1])
if v["query_id"]:
Qstart = str(v["query_location"][0])
Qend = str(v["query_location"][1])
else:
Qstart = "None"
Qend = "None"
total_cdsAED.append(float(v["cdsAED"]))
total_exonAED.append(float(v["exonAED"]))
out.write(
"{:}:{:}-{:}\t{:}\t{:}\t{:}\t{:}:{:}-{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n".format(
v["contig"],
Rstart,
Rend,
v["ref_id"],
v["ref_strand"],
v["ref_transcripts"],
v["contig"],
Qstart,
Qend,
v["query_id"],
v["query_strand"],
v["query_transcripts"],
v["exonAED"],
v["cdsAED"],
)
)
Avg_cdsAED = sum(total_cdsAED) / float(len(total_cdsAED))
Avg_exonAED = sum(total_exonAED) / float(len(total_exonAED))
totalPident = 0.00
if totalmatches > 0:
totalPident = totalmatches / totallength
if output:
out.close()
return [
NumOldLoci,
NumOldGenes,
NumOldmRNA,
NumOldtRNALoci,
NumOldtRNA,
refUnique,
no_change,
identicalCDS,
0.000,
0.000,
1,
NumNewLoci,
NumNewGenes,
NumNewmRNA,
NumNewtRNALoci,
NumNewtRNA,
queryUnique,
no_change,
identicalCDS,
Avg_exonAED,
Avg_cdsAED,
totalPident,
]
| en | 0.893692 | function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary # given funannotate dictionary, count up some general features takes a multiple transcripts and sums AED from lowest pairwise comparison and then calculates the average based on number of transcripts in the query # split into parts to get lowest AED function takes two GFF annotated genomes and compares gene models output is a tsv file for each locus and a description of what is different can handle multiple transcripts per locus # now go through the updated annotation, comparing to old annot # means this is a new model, so add it # there might be some overlapping transcripts, so get best hit? # get best hit # get the old annotation # calculate AED # get some summary stats as you loop through # now add old genes that did not have overlaps # sort the annotations by contig and start location | 2.576159 | 3 |
api/app.py | UUDigitalHumanitieslab/historic-hebrew-dates | 1 | 6633147 | import os
import sys
import glob
import csv
import json
import traceback
from flask import Flask, jsonify, request
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from historic_hebrew_dates import create_parsers
app = Flask(__name__)
def pattern_path(lang, type):
path = os.path.join('historic_hebrew_dates', 'patterns', f'{lang}_{type}.csv')
if not os.path.exists(path):
with open(path, "w+", encoding='utf8') as f:
f.write("type,pattern,value\n")
return path
@app.route("/api/patterns", methods=['GET'])
def overview():
languages = {}
for lang in glob.glob(os.path.join('historic_hebrew_dates', 'patterns', f'*.json')):
key = os.path.splitext(os.path.basename(lang))[0]
with open(lang, encoding='utf-8-sig') as description:
languages[key] = json.load(description)
return jsonify(languages)
@app.route("/api/patterns/<lang>/<type>", methods=['GET'])
def get(lang, type):
with open(pattern_path(lang, type), encoding='utf-8-sig') as patterns:
reader = csv.reader(patterns)
return jsonify(list(reader))
@app.route("/api/patterns/<lang>/<type>", methods=['PUT'])
def put(lang, type):
data = request.get_json()
rows = data['rows']
with open(pattern_path(lang, type), mode='w', encoding='utf-8-sig') as patterns:
for row in rows:
patterns.write(
','.join(map(lambda cell: f'"{cell}"' if ',' in cell else cell, row)) + '\n')
return jsonify({'success': True})
@app.route("/api/parse/<lang>/<type>", methods=['POST'])
def parse(lang, type):
data = request.get_json()
input = data['input']
rows = data['rows']
parser = create_parsers(lang, override_rows={
type: rows
})[type]
failure = False
try:
expression = parser.parse(input)
except Exception as error:
expression = str(error)
failure = True
else:
if expression == None:
evaluated = None
failure = True
else:
try:
evaluated = str(parser.parse(input, True))
except Exception as error:
evaluated = str(error)
failure = True
return jsonify({'expression': expression, 'evaluated': evaluated, 'error': failure})
@app.route("/api/search/<lang>/<type>", methods=['POST'])
def search(lang, type):
data = request.get_json()
input = data['input']
rows = data['rows']
parser = create_parsers(lang, override_rows={
type: rows
})[type]
failure = False
try:
result = [escape_search(item) for item in list(parser.search(input))]
except Exception as error:
result = str(error)
print(traceback.format_exc())
failure = True
return jsonify({'result': result, 'error': failure})
def escape_search(item):
if 'eval' in item:
item['eval'] = str(item['eval'])
return item
| import os
import sys
import glob
import csv
import json
import traceback
from flask import Flask, jsonify, request
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from historic_hebrew_dates import create_parsers
app = Flask(__name__)
def pattern_path(lang, type):
path = os.path.join('historic_hebrew_dates', 'patterns', f'{lang}_{type}.csv')
if not os.path.exists(path):
with open(path, "w+", encoding='utf8') as f:
f.write("type,pattern,value\n")
return path
@app.route("/api/patterns", methods=['GET'])
def overview():
languages = {}
for lang in glob.glob(os.path.join('historic_hebrew_dates', 'patterns', f'*.json')):
key = os.path.splitext(os.path.basename(lang))[0]
with open(lang, encoding='utf-8-sig') as description:
languages[key] = json.load(description)
return jsonify(languages)
@app.route("/api/patterns/<lang>/<type>", methods=['GET'])
def get(lang, type):
with open(pattern_path(lang, type), encoding='utf-8-sig') as patterns:
reader = csv.reader(patterns)
return jsonify(list(reader))
@app.route("/api/patterns/<lang>/<type>", methods=['PUT'])
def put(lang, type):
data = request.get_json()
rows = data['rows']
with open(pattern_path(lang, type), mode='w', encoding='utf-8-sig') as patterns:
for row in rows:
patterns.write(
','.join(map(lambda cell: f'"{cell}"' if ',' in cell else cell, row)) + '\n')
return jsonify({'success': True})
@app.route("/api/parse/<lang>/<type>", methods=['POST'])
def parse(lang, type):
data = request.get_json()
input = data['input']
rows = data['rows']
parser = create_parsers(lang, override_rows={
type: rows
})[type]
failure = False
try:
expression = parser.parse(input)
except Exception as error:
expression = str(error)
failure = True
else:
if expression == None:
evaluated = None
failure = True
else:
try:
evaluated = str(parser.parse(input, True))
except Exception as error:
evaluated = str(error)
failure = True
return jsonify({'expression': expression, 'evaluated': evaluated, 'error': failure})
@app.route("/api/search/<lang>/<type>", methods=['POST'])
def search(lang, type):
data = request.get_json()
input = data['input']
rows = data['rows']
parser = create_parsers(lang, override_rows={
type: rows
})[type]
failure = False
try:
result = [escape_search(item) for item in list(parser.search(input))]
except Exception as error:
result = str(error)
print(traceback.format_exc())
failure = True
return jsonify({'result': result, 'error': failure})
def escape_search(item):
if 'eval' in item:
item['eval'] = str(item['eval'])
return item
| none | 1 | 2.923128 | 3 |
|
ThirdAndFourthStage/retweet_collection.py | kamplus/FakeNewsSetGen | 1 | 6633148 | <gh_stars>1-10
import json
import logging
from twython import TwythonError, TwythonRateLimitError
from tweet_collection import Tweet
from util.TwythonConnector import TwythonConnector
from util.util import create_dir, Config, multiprocess_data_collection
from util.util import DataCollector
from util import Constants
def dump_retweets_job(tweet: Tweet, config: Config, twython_connector: TwythonConnector):
retweets = []
connection = None
try:
connection = twython_connector.get_twython_connection("get_retweet")
retweets = connection.get_retweets(id=tweet.tweet_id, count=100, cursor=-1)
except TwythonRateLimitError:
logging.exception("Twython API rate limit exception - tweet id : {}".format(tweet.tweet_id))
except Exception:
logging.exception(
"Exception in getting retweets for tweet id %d using connection %s" % (tweet.tweet_id, connection))
retweet_obj = {"retweets": retweets}
dump_dir = "{}/{}/{}/{}".format(config.dump_location, tweet.news_source, tweet.label, tweet.news_id)
retweet_dir = "{}/retweets".format(dump_dir)
create_dir(dump_dir)
create_dir(retweet_dir)
json.dump(retweet_obj, open("{}/{}.json".format(retweet_dir, tweet.tweet_id), "w"))
def collect_retweets(news_list, news_source, label, config: Config):
create_dir(config.dump_location)
create_dir("{}/{}".format(config.dump_location, news_source))
create_dir("{}/{}/{}".format(config.dump_location, news_source, label))
save_dir = "{}/{}/{}".format(config.dump_location, news_source, label)
tweet_id_list = []
for news in news_list:
for tweet_id in news.tweet_ids:
tweet_id_list.append(Tweet(tweet_id, news.news_id, news_source, label))
multiprocess_data_collection(dump_retweets_job, tweet_id_list, (config, config.twython_connector), config)
class RetweetCollector(DataCollector):
def __init__(self, config):
super(RetweetCollector, self).__init__(config)
def collect_data(self, choices):
for choice in choices:
news_list = self.load_news_file(choice)
collect_retweets(news_list, choice["news_source"], choice["label"], self.config)
| import json
import logging
from twython import TwythonError, TwythonRateLimitError
from tweet_collection import Tweet
from util.TwythonConnector import TwythonConnector
from util.util import create_dir, Config, multiprocess_data_collection
from util.util import DataCollector
from util import Constants
def dump_retweets_job(tweet: Tweet, config: Config, twython_connector: TwythonConnector):
retweets = []
connection = None
try:
connection = twython_connector.get_twython_connection("get_retweet")
retweets = connection.get_retweets(id=tweet.tweet_id, count=100, cursor=-1)
except TwythonRateLimitError:
logging.exception("Twython API rate limit exception - tweet id : {}".format(tweet.tweet_id))
except Exception:
logging.exception(
"Exception in getting retweets for tweet id %d using connection %s" % (tweet.tweet_id, connection))
retweet_obj = {"retweets": retweets}
dump_dir = "{}/{}/{}/{}".format(config.dump_location, tweet.news_source, tweet.label, tweet.news_id)
retweet_dir = "{}/retweets".format(dump_dir)
create_dir(dump_dir)
create_dir(retweet_dir)
json.dump(retweet_obj, open("{}/{}.json".format(retweet_dir, tweet.tweet_id), "w"))
def collect_retweets(news_list, news_source, label, config: Config):
create_dir(config.dump_location)
create_dir("{}/{}".format(config.dump_location, news_source))
create_dir("{}/{}/{}".format(config.dump_location, news_source, label))
save_dir = "{}/{}/{}".format(config.dump_location, news_source, label)
tweet_id_list = []
for news in news_list:
for tweet_id in news.tweet_ids:
tweet_id_list.append(Tweet(tweet_id, news.news_id, news_source, label))
multiprocess_data_collection(dump_retweets_job, tweet_id_list, (config, config.twython_connector), config)
class RetweetCollector(DataCollector):
def __init__(self, config):
super(RetweetCollector, self).__init__(config)
def collect_data(self, choices):
for choice in choices:
news_list = self.load_news_file(choice)
collect_retweets(news_list, choice["news_source"], choice["label"], self.config) | none | 1 | 2.362298 | 2 |
|
moldesign/utils/callsigs.py | Autodesk/molecular-design-toolkit | 147 | 6633149 | from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import os
from functools import wraps
import collections
import funcsigs
from .utils import if_not_none
from .docparsers import GoogleDocArgumentInjector
def args_from(original_function,
only=None,
allexcept=None,
inject_kwargs=None,
inject_docs=None,
wraps=None,
update_docstring_args=False):
"""
Decorator to transfer call signatures - helps to hide ugly *args and **kwargs in delegated calls
Args:
original_function (callable): the function to take the call signature from
only (List[str]): only transfer these arguments (incompatible with `allexcept`)
wraps (bool): Transfer documentation and attributes from original_function to
decorated_function, using functools.wraps (default: True if call signature is
unchanged, False otherwise)
allexcept (List[str]): transfer all except these arguments (incompatible with `only`)
inject_kwargs (dict): Inject new kwargs into the call signature
(of the form ``{argname: defaultvalue}``)
inject_docs (dict): Add or modifies argument documentation (requires google-style
docstrings) with a dict of the form `{argname: "(type): description"}`
update_docstring_args (bool): Update "arguments" section of the docstring using the
original function's documentation (requires google-style docstrings and wraps=False)
Note:
To use arguments from a classes' __init__ method, pass the class itself as
``original_function`` - this will also allow us to inject the documentation
Returns:
Decorator function
"""
# NEWFEATURE - verify arguments?
if only and allexcept:
raise ValueError('Error in keyword arguments - '
'pass *either* "only" or "allexcept", not both')
origname = get_qualified_name(original_function)
if hasattr(original_function, '__signature__'):
sig = original_function.__signature__.replace()
else:
sig = funcsigs.signature(original_function)
# Modify the call signature if necessary
if only or allexcept or inject_kwargs:
wraps = if_not_none(wraps, False)
newparams = []
if only:
for param in only:
newparams.append(sig.parameters[param])
elif allexcept:
for name, param in sig.parameters.items():
if name not in allexcept:
newparams.append(param)
else:
newparams = list(sig.parameters.values())
if inject_kwargs:
for name, default in inject_kwargs.items():
newp = funcsigs.Parameter(name, funcsigs.Parameter.POSITIONAL_OR_KEYWORD,
default=default)
newparams.append(newp)
newparams.sort(key=lambda param: param._kind)
sig = sig.replace(parameters=newparams)
else:
wraps = if_not_none(wraps, True)
# Get the docstring arguments
if update_docstring_args:
original_docs = GoogleDocArgumentInjector(original_function.__doc__)
argument_docstrings = collections.OrderedDict((p.name, original_docs.args[p.name])
for p in newparams)
def decorator(f):
"""Modify f's call signature (using the `__signature__` attribute)"""
if wraps:
fname = original_function.__name__
f = functools.wraps(original_function)(f)
f.__name__ = fname # revert name change
else:
fname = f.__name__
f.__signature__ = sig
if update_docstring_args or inject_kwargs:
if not update_docstring_args:
argument_docstrings = GoogleDocArgumentInjector(f.__doc__).args
docs = GoogleDocArgumentInjector(f.__doc__)
docs.args = argument_docstrings
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
# Only for building sphinx documentation:
if os.environ.get('SPHINX_IS_BUILDING_DOCS', ""):
sigstring = '%s%s\n' % (fname, sig)
if hasattr(f, '__doc__') and f.__doc__ is not None:
f.__doc__ = sigstring + f.__doc__
else:
f.__doc__ = sigstring
return f
return decorator
def kwargs_from(reference_function, mod_docs=True):
""" Replaces ``**kwargs`` in a call signature with keyword arguments from another function.
Args:
reference_function (function): function to get kwargs from
mod_docs (bool): whether to modify the decorated function's docstring
Note:
``mod_docs`` works ONLY for google-style docstrings
"""
refsig = funcsigs.signature(reference_function)
origname = get_qualified_name(reference_function)
kwparams = []
for name, param in refsig.parameters.items():
if param.default != param.empty or param.kind in (param.VAR_KEYWORD, param.KEYWORD_ONLY):
if param.name[0] != '_':
kwparams.append(param)
if mod_docs:
refdocs = GoogleDocArgumentInjector(reference_function.__doc__)
def decorator(f):
sig = funcsigs.signature(f)
fparams = []
found_varkeyword = None
for name, param in sig.parameters.items():
if param.kind == param.VAR_KEYWORD:
fparams.extend(kwparams)
found_varkeyword = name
else:
fparams.append(param)
if not found_varkeyword:
raise TypeError("Function has no **kwargs wildcard.")
f.__signature__ = sig.replace(parameters=fparams)
if mod_docs:
docs = GoogleDocArgumentInjector(f.__doc__)
new_args = collections.OrderedDict()
for argname, doc in docs.args.items():
if argname == found_varkeyword:
for param in kwparams:
default_argdoc = '%s: argument for %s' % (param.name, origname)
new_args[param.name] = refdocs.args.get(param.name, default_argdoc)
else:
new_args[argname] = doc
docs.args = new_args
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
return f
return decorator
def get_qualified_name(original_function):
if inspect.ismethod(original_function):
origname = '.'.join([original_function.__module__,
original_function.__self__.__class__.__name__,
original_function.__name__])
return ':meth:`%s`' % origname
else:
origname = original_function.__module__+'.'+original_function.__name__
return ':meth:`%s`' % origname
class DocInherit(object):
"""
Allows methods to inherit docstrings from their superclasses
FROM http://code.activestate.com/recipes/576862/
"""
def __init__(self, mthd):
self.mthd = mthd
self.name = mthd.__name__
def __get__(self, obj, cls):
if obj:
return self.get_with_inst(obj, cls)
else:
return self.get_no_inst(cls)
def get_with_inst(self, obj, cls):
overridden = getattr(super(), self.name, None)
@wraps(self.mthd, assigned=('__name__','__module__'))
def f(*args, **kwargs):
return self.mthd(obj, *args, **kwargs)
return self.use_parent_doc(f, overridden)
def get_no_inst(self, cls):
for parent in cls.__mro__[1:]:
overridden = getattr(parent, self.name, None)
if overridden: break
@wraps(self.mthd, assigned=('__name__','__module__'))
def f(*args, **kwargs):
return self.mthd(*args, **kwargs)
return self.use_parent_doc(f, overridden)
def use_parent_doc(self, func, source):
if source is None:
raise NameError("Can't find '%s' in parents"%self.name)
func.__doc__ = source.__doc__
return func
#idiomatic decorator name
doc_inherit = DocInherit | from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import os
from functools import wraps
import collections
import funcsigs
from .utils import if_not_none
from .docparsers import GoogleDocArgumentInjector
def args_from(original_function,
only=None,
allexcept=None,
inject_kwargs=None,
inject_docs=None,
wraps=None,
update_docstring_args=False):
"""
Decorator to transfer call signatures - helps to hide ugly *args and **kwargs in delegated calls
Args:
original_function (callable): the function to take the call signature from
only (List[str]): only transfer these arguments (incompatible with `allexcept`)
wraps (bool): Transfer documentation and attributes from original_function to
decorated_function, using functools.wraps (default: True if call signature is
unchanged, False otherwise)
allexcept (List[str]): transfer all except these arguments (incompatible with `only`)
inject_kwargs (dict): Inject new kwargs into the call signature
(of the form ``{argname: defaultvalue}``)
inject_docs (dict): Add or modifies argument documentation (requires google-style
docstrings) with a dict of the form `{argname: "(type): description"}`
update_docstring_args (bool): Update "arguments" section of the docstring using the
original function's documentation (requires google-style docstrings and wraps=False)
Note:
To use arguments from a classes' __init__ method, pass the class itself as
``original_function`` - this will also allow us to inject the documentation
Returns:
Decorator function
"""
# NEWFEATURE - verify arguments?
if only and allexcept:
raise ValueError('Error in keyword arguments - '
'pass *either* "only" or "allexcept", not both')
origname = get_qualified_name(original_function)
if hasattr(original_function, '__signature__'):
sig = original_function.__signature__.replace()
else:
sig = funcsigs.signature(original_function)
# Modify the call signature if necessary
if only or allexcept or inject_kwargs:
wraps = if_not_none(wraps, False)
newparams = []
if only:
for param in only:
newparams.append(sig.parameters[param])
elif allexcept:
for name, param in sig.parameters.items():
if name not in allexcept:
newparams.append(param)
else:
newparams = list(sig.parameters.values())
if inject_kwargs:
for name, default in inject_kwargs.items():
newp = funcsigs.Parameter(name, funcsigs.Parameter.POSITIONAL_OR_KEYWORD,
default=default)
newparams.append(newp)
newparams.sort(key=lambda param: param._kind)
sig = sig.replace(parameters=newparams)
else:
wraps = if_not_none(wraps, True)
# Get the docstring arguments
if update_docstring_args:
original_docs = GoogleDocArgumentInjector(original_function.__doc__)
argument_docstrings = collections.OrderedDict((p.name, original_docs.args[p.name])
for p in newparams)
def decorator(f):
"""Modify f's call signature (using the `__signature__` attribute)"""
if wraps:
fname = original_function.__name__
f = functools.wraps(original_function)(f)
f.__name__ = fname # revert name change
else:
fname = f.__name__
f.__signature__ = sig
if update_docstring_args or inject_kwargs:
if not update_docstring_args:
argument_docstrings = GoogleDocArgumentInjector(f.__doc__).args
docs = GoogleDocArgumentInjector(f.__doc__)
docs.args = argument_docstrings
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
# Only for building sphinx documentation:
if os.environ.get('SPHINX_IS_BUILDING_DOCS', ""):
sigstring = '%s%s\n' % (fname, sig)
if hasattr(f, '__doc__') and f.__doc__ is not None:
f.__doc__ = sigstring + f.__doc__
else:
f.__doc__ = sigstring
return f
return decorator
def kwargs_from(reference_function, mod_docs=True):
""" Replaces ``**kwargs`` in a call signature with keyword arguments from another function.
Args:
reference_function (function): function to get kwargs from
mod_docs (bool): whether to modify the decorated function's docstring
Note:
``mod_docs`` works ONLY for google-style docstrings
"""
refsig = funcsigs.signature(reference_function)
origname = get_qualified_name(reference_function)
kwparams = []
for name, param in refsig.parameters.items():
if param.default != param.empty or param.kind in (param.VAR_KEYWORD, param.KEYWORD_ONLY):
if param.name[0] != '_':
kwparams.append(param)
if mod_docs:
refdocs = GoogleDocArgumentInjector(reference_function.__doc__)
def decorator(f):
sig = funcsigs.signature(f)
fparams = []
found_varkeyword = None
for name, param in sig.parameters.items():
if param.kind == param.VAR_KEYWORD:
fparams.extend(kwparams)
found_varkeyword = name
else:
fparams.append(param)
if not found_varkeyword:
raise TypeError("Function has no **kwargs wildcard.")
f.__signature__ = sig.replace(parameters=fparams)
if mod_docs:
docs = GoogleDocArgumentInjector(f.__doc__)
new_args = collections.OrderedDict()
for argname, doc in docs.args.items():
if argname == found_varkeyword:
for param in kwparams:
default_argdoc = '%s: argument for %s' % (param.name, origname)
new_args[param.name] = refdocs.args.get(param.name, default_argdoc)
else:
new_args[argname] = doc
docs.args = new_args
if not hasattr(f, '__orig_docs'):
f.__orig_docs = []
f.__orig_docs.append(f.__doc__)
f.__doc__ = docs.new_docstring()
return f
return decorator
def get_qualified_name(original_function):
if inspect.ismethod(original_function):
origname = '.'.join([original_function.__module__,
original_function.__self__.__class__.__name__,
original_function.__name__])
return ':meth:`%s`' % origname
else:
origname = original_function.__module__+'.'+original_function.__name__
return ':meth:`%s`' % origname
class DocInherit(object):
"""
Allows methods to inherit docstrings from their superclasses
FROM http://code.activestate.com/recipes/576862/
"""
def __init__(self, mthd):
self.mthd = mthd
self.name = mthd.__name__
def __get__(self, obj, cls):
if obj:
return self.get_with_inst(obj, cls)
else:
return self.get_no_inst(cls)
def get_with_inst(self, obj, cls):
overridden = getattr(super(), self.name, None)
@wraps(self.mthd, assigned=('__name__','__module__'))
def f(*args, **kwargs):
return self.mthd(obj, *args, **kwargs)
return self.use_parent_doc(f, overridden)
def get_no_inst(self, cls):
for parent in cls.__mro__[1:]:
overridden = getattr(parent, self.name, None)
if overridden: break
@wraps(self.mthd, assigned=('__name__','__module__'))
def f(*args, **kwargs):
return self.mthd(*args, **kwargs)
return self.use_parent_doc(f, overridden)
def use_parent_doc(self, func, source):
if source is None:
raise NameError("Can't find '%s' in parents"%self.name)
func.__doc__ = source.__doc__
return func
#idiomatic decorator name
doc_inherit = DocInherit | en | 0.669663 | # Copyright 2017 Autodesk Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Decorator to transfer call signatures - helps to hide ugly *args and **kwargs in delegated calls Args: original_function (callable): the function to take the call signature from only (List[str]): only transfer these arguments (incompatible with `allexcept`) wraps (bool): Transfer documentation and attributes from original_function to decorated_function, using functools.wraps (default: True if call signature is unchanged, False otherwise) allexcept (List[str]): transfer all except these arguments (incompatible with `only`) inject_kwargs (dict): Inject new kwargs into the call signature (of the form ``{argname: defaultvalue}``) inject_docs (dict): Add or modifies argument documentation (requires google-style docstrings) with a dict of the form `{argname: "(type): description"}` update_docstring_args (bool): Update "arguments" section of the docstring using the original function's documentation (requires google-style docstrings and wraps=False) Note: To use arguments from a classes' __init__ method, pass the class itself as ``original_function`` - this will also allow us to inject the documentation Returns: Decorator function # NEWFEATURE - verify arguments? # Modify the call signature if necessary # Get the docstring arguments Modify f's call signature (using the `__signature__` attribute) # revert name change # Only for building sphinx documentation: Replaces ``**kwargs`` in a call signature with keyword arguments from another function. Args: reference_function (function): function to get kwargs from mod_docs (bool): whether to modify the decorated function's docstring Note: ``mod_docs`` works ONLY for google-style docstrings Allows methods to inherit docstrings from their superclasses FROM http://code.activestate.com/recipes/576862/ #idiomatic decorator name | 1.958953 | 2 |
pyscripts/s13_pacman_reflector_hook.py | Trick-17/arch-installer | 9 | 6633150 | pacman_reflector_hook = """[Trigger]
Operation = Upgrade
Type = Package
Target = pacman-mirrorlist
[Action]
Description = Updating pacman-mirrorlist with reflector and removing pacnew...
When = PostTransaction
Depends = reflector
Exec = /usr/bin/env sh -c "reflector --latest 100 --sort rate --protocol https --save /etc/pacman.d/mirrorlist; if [[ -f /etc/pacman.d/mirrorlist.pacnew ]]; then rm /etc/pacman.d/mirrorlist.pacnew; fi"
"""
def configure_pacman_reflector_hook():
import os
os.makedirs('/mnt/etc/pacman.d/hooks')
with open('/mnt/etc/pacman.d/hooks/mirrorupgrade.hook', 'w+') as txt_file:
txt_file.write(pacman_reflector_hook) | pacman_reflector_hook = """[Trigger]
Operation = Upgrade
Type = Package
Target = pacman-mirrorlist
[Action]
Description = Updating pacman-mirrorlist with reflector and removing pacnew...
When = PostTransaction
Depends = reflector
Exec = /usr/bin/env sh -c "reflector --latest 100 --sort rate --protocol https --save /etc/pacman.d/mirrorlist; if [[ -f /etc/pacman.d/mirrorlist.pacnew ]]; then rm /etc/pacman.d/mirrorlist.pacnew; fi"
"""
def configure_pacman_reflector_hook():
import os
os.makedirs('/mnt/etc/pacman.d/hooks')
with open('/mnt/etc/pacman.d/hooks/mirrorupgrade.hook', 'w+') as txt_file:
txt_file.write(pacman_reflector_hook) | en | 0.518308 | [Trigger] Operation = Upgrade Type = Package Target = pacman-mirrorlist [Action] Description = Updating pacman-mirrorlist with reflector and removing pacnew... When = PostTransaction Depends = reflector Exec = /usr/bin/env sh -c "reflector --latest 100 --sort rate --protocol https --save /etc/pacman.d/mirrorlist; if [[ -f /etc/pacman.d/mirrorlist.pacnew ]]; then rm /etc/pacman.d/mirrorlist.pacnew; fi" | 2.072083 | 2 |