repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
shadowleaves/acr | agg.py | 1 | 1719 | #!/usr/bin/env python
# import pandas as pd
# import subprocess
# from datetime import datetime
import os
# import csv
# import shlex
# import json
# import bson
import gzip
# from pytz import UTC
def ensure_path(*p):
f = os.path.join(*p)
f = os.path.expandvars(f)
if not os.path.exists(f):
try:
os.makedirs(f)
except Exception:
pass
return(f)
def aggregate(sym, source_path, dest_path):
subpath = os.path.join(source_path, sym)
new_filepath = os.path.join(dest_path, '%s.bson.gz' % sym)
print 'aggregating %s ...' % sym
try:
with gzip.open(new_filepath, 'wb') as new_bson:
for file in os.listdir(subpath):
filepath = os.path.join(subpath, file)
with gzip.open(filepath, 'rb') as bson_file:
for line in bson_file.readlines():
new_bson.write(line)
except KeyboardInterrupt:
print '^C detected...'
def main():
from multiprocessing import Pool
pool = Pool(processes=8)
# from multiprocessing.pool import ThreadPool
# pool = ThreadPool(processes=5)
exch = 'bbl1_equities_all'
path = os.path.expandvars('$HOME/data/bson/%s' % exch)
newpath = ensure_path('$HOME/Dropbox/intraday/bson/agg')
# newpath = ensure_path('/tmp/agg')
res = []
for sym in sorted(os.listdir(path)):
args = (sym, path, newpath)
# download_bars(*args)
result = pool.apply_async(aggregate, args=args)
res.append(result)
# retriving results (process won't start to run until here)
[x.get() for x in res]
# print 'total tasks %d' % len(tmp)
if __name__ == '__main__':
main()
| mit |
mkuron/espresso | samples/lj-demo.py | 1 | 20362 | #
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import matplotlib
matplotlib.use('WXAgg')
import espressomd
espressomd.assert_features(["LENNARD_JONES"])
from espressomd import thermostat
from espressomd import visualization
import numpy as np
from matplotlib import pyplot
from threading import Thread
from traits.api import HasTraits, Button, Any, Range, List, Enum, Float
from traitsui.api import View, Group, Item, CheckListEditor, RangeEditor, EnumEditor
import sys
import time
use_opengl = "opengl" in sys.argv
use_mayavi = "mayavi" in sys.argv
if not use_opengl and not use_mayavi:
use_mayavi = True
assert use_opengl != use_mayavi
if use_mayavi:
from espressomd.visualization_mayavi import mlab
if use_opengl:
from pyface.api import GUI
try:
import midi
except:
try:
from pygame import midi
except:
from portmidi import midi
midi.init()
# if log flag is set, midi controller will change pressure logarithmically
pressure_log_flag = True
mayavi_autozoom = False # autozoom is buggy... works only for rotation
old_pressure = -1
# NPT variables
#############################################################
NPTGamma0 = 1.0
#NPTInitPistonMass = 1e-06
#NPTMinPistonMass = 1e-06
NPTMinPistonMass = 1e-04
NPTMaxPistonMass = 1.0
NPTInitPistonMass = NPTMinPistonMass
# System parameters
#############################################################
# 300 Particles
box_l = 7.5395
density = 0.7
#global_boxlen = box_l
#mainthread_boxlen = box_l
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 2.5 * lj_sig
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
system.time_step = 0.01
system.cell_system.skin = 0.4
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
system.cell_system.set_n_square(use_verlet_lists=False)
# do the warmup until the particles have at least the distance min_dist
min_dist = 0.9
# integration
int_steps = 1
int_n_times = 5000000
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.force_cap = lj_cap
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
system.analysis.dist_to(0)
act_min_dist = system.analysis.min_dist()
system.cell_system.max_num_cells = 2744
if use_mayavi:
vis = visualization.mayaviLive(system)
elif use_opengl:
vis = visualization.openGLLive(system)
mayavi_rotation_angle = 45.
mayavi_rotation_angle_step = 5.
mayavi_zoom = 36.
mayavi_zoom_old = mayavi_zoom
mayavi_zoom_step = 3.
plot_max_data_len = 20
#############################################################
# GUI Controls #
#############################################################
inputs, outputs = [], []
for i in range(midi.get_count()):
interf, name, input, output, opened = midi.get_device_info(i)
if input:
inputs.append((i, interf + " " + name))
if output:
outputs.append((i, interf + " " + name))
class Controls(HasTraits):
if len(inputs) == 1:
default_input = inputs
for i in inputs:
if not "Through Port" in i[1]:
default_input = i
break
default_input = default_input if inputs else None
default_output = -1
through_port_output = None
for i in outputs:
if not "Through Port" in i[1]:
default_output = i
break
else:
through_port_output = i
default_output = default_output if len(
outputs) > 1 else through_port_output
if default_input is None or default_output is None:
print('Cannot connect to any MIDI device')
input_device = List(value=default_input,
editor=CheckListEditor(values=inputs))
output_device = List(value=default_output,
editor=CheckListEditor(values=outputs))
max_temp = 2.
min_temp = 0.5
max_press = 10.
min_press = 5e-4
max_vol = 100000.
min_vol = 50.
max_n = 1000
min_n = 50
temperature = Range(min_temp, max_temp, 1., )
volume = Float(box_l**3.)
pressure = Float(1.)
number_of_particles = Range(min_n, max_n, n_part, )
ensemble = Enum('NVT', 'NPT')
midi_input = None
midi_output = None
MIDI_BASE = 224
MIDI_NUM_TEMPERATURE = MIDI_BASE + 0
MIDI_NUM_VOLUME = MIDI_BASE + 1
MIDI_NUM_PRESSURE = MIDI_BASE + 2
MIDI_NUM_NUMBEROFPARTICLES = MIDI_BASE + 3
MIDI_ROTATE = 0
MIDI_ZOOM = 144
_ui = Any
view = View(
Group(
Item('temperature', editor=RangeEditor(
low_name='min_temp', high_name='max_temp')),
Item('volume', editor=RangeEditor(
low_name='min_vol', high_name='max_vol')),
Item('pressure', editor=RangeEditor(
low_name='min_press', high_name='max_press')),
Item('number_of_particles', editor=RangeEditor(
low_name='min_n', high_name='max_n', is_float=False)),
Item('ensemble', style='custom'),
show_labels=True,
label='Parameters'
),
Group(
Item('input_device'),
Item('output_device'),
show_labels=True,
label='MIDI devices'
),
buttons=[],
title='Control',
height=0.2,
width=0.3
)
def __init__(self, **traits):
super(Controls, self).__init__(**traits)
self._ui = self.edit_traits()
self.push_current_values()
def push_current_values(self):
"""send the current values to the MIDI controller"""
self._temperature_fired()
self._volume_fired()
self._pressure_fired()
self._number_of_particles_fired()
self._ensemble_fired()
def _input_device_fired(self):
if self.midi_input is not None:
self.midi_input.close()
if self.input_device:
self.midi_input = midi.Input(self.input_device[0])
def _output_device_fired(self):
if self.midi_output is not None:
self.midi_output.close()
self.midi_output = midi.Output(self.output_device[0])
self.push_current_values()
def _temperature_fired(self):
status = self.MIDI_NUM_TEMPERATURE
data1 = int((self.temperature - self.min_temp) /
(self.max_temp - self.min_temp) * 127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _volume_fired(self):
status = self.MIDI_NUM_VOLUME
data1 = limit_range(int((system.box_l[0]**3. - self.min_vol) /
(self.max_vol - self.min_vol) * 127), minval=0, maxval=127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _pressure_fired(self):
status = self.MIDI_NUM_PRESSURE
if pressure_log_flag:
data1 = limit_range(int(127 * (np.log(self.pressure) - np.log(self.min_press)) / (
np.log(self.max_press) - np.log(self.min_press))), minval=0, maxval=127)
else:
data1 = limit_range(int((self.pressure - self.min_press) /
(self.max_press - self.min_press) * 127), minval=0, maxval=127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _number_of_particles_fired(self):
status = self.MIDI_NUM_NUMBEROFPARTICLES
data1 = int(self.number_of_particles / self.max_n * 127)
data2 = data1
if self.midi_output is not None:
self.midi_output.write_short(status, data1, data2)
def _ensemble_fired(self):
if self.midi_output is not None:
self.midi_output.write_short(144, 0, 127) # T
self.midi_output.write_short(
144, 1, 127 * (self.ensemble != 'NPT')) # V
self.midi_output.write_short(
144, 2, 127 * (self.ensemble == 'NPT')) # P
self.midi_output.write_short(144, 3, 127) # N
#############################################################
# Integration #
#############################################################
# get initial observables
pressure = system.analysis.pressure()
temperature = 0.0
# TODO: this is some terrible polynomial fit, replace it with a better expression
# equation of state
pyplot.subplot(131)
pyplot.semilogy()
pyplot.title("Phase diagram")
pyplot.xlabel("Temperature")
pyplot.ylabel("Pressure")
pyplot.xlim(0.5, 2.0)
pyplot.ylim(5e-5, 2e1)
xx = np.linspace(0.5, 0.7, 200)
pyplot.plot(xx, -6.726 * xx**4 + 16.92 * xx**3 -
15.85 * xx**2 + 6.563 * xx - 1.015, 'k-')
xx = np.linspace(0.7, 1.3, 600)
pyplot.plot(xx, -0.5002 * xx**4 + 2.233 * xx**3 -
3.207 * xx**2 + 1.917 * xx - 0.4151, 'k-')
xx = np.linspace(0.6, 2.2, 1500)
pyplot.plot(xx, 16.72 * xx**4 - 88.28 * xx**3 +
168 * xx**2 - 122.4 * xx + 29.79, 'k-')
cursor = pyplot.scatter(temperature, pressure['total'], 200, 'g')
#cursor2 = pyplot.scatter(-1, -1, 200, 'r')
pyplot.text(0.6, 10, 'solid')
pyplot.text(1, 1, 'liquid')
pyplot.text(1, 10**-3, 'gas')
pyplot.subplot(132)
pyplot.title("Temperature")
plot1, = pyplot.plot([0], [temperature])
pyplot.xlabel("Time")
pyplot.ylabel("Temperature")
pyplot.subplot(133)
pyplot.title("Pressure")
plot2, = pyplot.plot([0], [pressure['total']])
pyplot.xlabel("Time")
pyplot.ylabel("Pressure")
# pyplot.legend()
pyplot.show(block=False)
plt1_x_data = np.zeros(1)
plt1_y_data = np.zeros(1)
plt2_x_data = np.zeros(1)
plt2_y_data = np.zeros(1)
def limit_range(val, minval=0., maxval=1.):
if val > maxval:
ret_val = maxval
elif val < minval:
ret_val = minval
else:
ret_val = val
if isinstance(val, int):
return int(ret_val)
elif isinstance(val, float):
return float(ret_val)
else:
return ret_val
def pressure_from_midi_val(midi_val, pmin, pmax, log_flag=pressure_log_flag):
if log_flag:
return pmin * (float(pmax) / pmin)**(float(midi_val) / 127)
else:
return (midi_val * (pmax - pmin) / 127 + pmin)
def main_loop():
global energies, plt1_x_data, plt1_y_data, plt2_x_data, plt2_y_data, old_pressure
system.integrator.run(steps=int_steps)
vis.update()
# increase LJ cap during warmup
if system.force_cap > 0:
if system.analysis.min_dist() < min_dist:
system.force_cap = system.force_cap + 0.1
else:
system.force_cap = 0
print("Switching off force capping")
# make sure the parameters are valid
# not sure if this is necessary after using limit_range
if controls.volume == 0:
controls.volume = controls.min_vol
if controls.number_of_particles == 0:
controls.number_of_particles = 1
if controls.pressure == 0:
controls.pressure = controls.min_press
pressure = system.analysis.pressure()
# update the parameters set in the GUI
if system.thermostat.get_state()[0]['kT'] != controls.temperature:
system.thermostat.set_langevin(kT=controls.temperature, gamma=1.0)
print("temperature changed")
system.force_cap = lj_cap
if controls.ensemble == 'NPT':
# reset Vkappa when target pressure has changed
if old_pressure != controls.pressure:
system.analysis.v_kappa('reset')
print("pressure changed")
old_pressure = controls.pressure
system.force_cap = lj_cap
newVkappa = system.analysis.v_kappa('read')['Vk1']
newVkappa = newVkappa if newVkappa > 0. else 4.0 / \
(NPTGamma0 * NPTGamma0 * NPTInitPistonMass)
pistonMass = limit_range(4.0 / (NPTGamma0 * NPTGamma0 * newVkappa),
NPTMinPistonMass, NPTMaxPistonMass)
system.integrator.set_isotropic_npt(
controls.pressure, pistonMass, cubic_box=True)
controls.volume = system.box_l[0]**3.
else:
system.integrator.set_nvt()
controls.pressure = pressure['total']
new_box = np.ones(3) * controls.volume**(1. / 3.)
if np.any(np.array(system.box_l) != new_box):
for i in range(len(system.part)):
system.part[i].pos = system.part[i].pos * \
new_box / system.box_l[0]
print("volume changed")
system.force_cap = lj_cap
system.box_l = new_box
new_part = controls.number_of_particles
if new_part > len(system.part):
for i in range(len(system.part), new_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
print("particles added")
system.force_cap = lj_cap
elif new_part < len(system.part):
for i in range(new_part, len(system.part)):
system.part[i].remove()
print("particles removed")
plt1_x_data = plot1.get_xdata()
plt1_y_data = plot1.get_ydata()
plt2_x_data = plot2.get_xdata()
plt2_y_data = plot2.get_ydata()
plt1_x_data = np.append(
plt1_x_data[-plot_max_data_len + 1:], system.time)
plt1_y_data = np.append(plt1_y_data[-plot_max_data_len + 1:],
2. / (3. * len(system.part))
* system.analysis.energy()["kinetic"])
plt2_x_data = np.append(
plt2_x_data[-plot_max_data_len + 1:], system.time)
plt2_y_data = np.append(
plt2_y_data[-plot_max_data_len + 1:], pressure['total'])
def main_thread():
for i in range(int_n_times):
main_loop()
def midi_thread():
global mayavi_rotation_angle, mayavi_zoom
while True:
try:
if controls.midi_input is not None and controls.midi_input.poll():
events = controls.midi_input.read(1000)
for event in events:
status, data1, data2, data3 = event[0]
if status == controls.MIDI_NUM_TEMPERATURE:
temperature = data2 * \
(controls.max_temp - controls.min_temp) / \
127 + controls.min_temp
controls.temperature = limit_range(
temperature, controls.min_temp, controls.max_temp)
elif status == controls.MIDI_NUM_VOLUME:
volume = data2 * \
(controls.max_vol - controls.min_vol) / \
127 + controls.min_vol
controls.volume = limit_range(
volume, controls.min_vol, controls.max_vol)
controls.ensemble = 'NVT'
elif status == controls.MIDI_NUM_PRESSURE:
pressure = pressure_from_midi_val(
data2, controls.min_press, controls.max_press)
controls.pressure = limit_range(
pressure, controls.min_press, controls.max_press)
controls.ensemble = 'NPT'
elif status == controls.MIDI_NUM_NUMBEROFPARTICLES:
npart = int(data2 * controls.max_n / 127)
controls.number_of_particles = limit_range(
npart, controls.min_n, controls.max_n)
elif status == controls.MIDI_ROTATE:
if data2 < 65:
# rotate clockwise
mayavi_rotation_angle += mayavi_rotation_angle_step * \
data2
elif data2 >= 65:
# rotate counterclockwise
mayavi_rotation_angle -= mayavi_rotation_angle_step * \
(data2 - 64)
elif status == controls.MIDI_ZOOM:
if data1 == 99 and data2 == 127:
# zoom in
mayavi_zoom -= mayavi_zoom_step
elif data1 == 98 and data2 == 127:
# zoom out
mayavi_zoom += mayavi_zoom_step
# else:
# print("Unknown Status {0} with data1={1} and
# data2={2}".format(status, data1, data2))
except Exception as e:
print(e)
time.sleep(0.01)
last_plotted = 0
def rotate_scene():
global mayavi_rotation_angle
if use_mayavi and mayavi_rotation_angle:
# mlab.yaw(mayavi_rotation_angle)
if mayavi_autozoom:
mlab.view(azimuth=mayavi_rotation_angle, distance='auto')
else:
current_view_vals = mlab.view()
mlab.view(azimuth=mayavi_rotation_angle,
elevation=current_view_vals[1],
distance=current_view_vals[2],
focalpoint=current_view_vals[3])
mayavi_rotation_angle %= 360.
def zoom_scene():
global mayavi_zoom, mayavi_zoom_old
if use_mayavi:
mlab.view(distance=mayavi_zoom)
elif use_opengl:
if mayavi_zoom_old < mayavi_zoom:
vis.camera.move_backward()
mayavi_zoom_old = mayavi_zoom
elif mayavi_zoom_old > mayavi_zoom:
vis.camera.move_forward()
help(vis.camera.move_forward)
mayavi_zoom_old = mayavi_zoom
def update_plot():
global last_plotted
# rotate_scene()
zoom_scene()
data_len = np.array([len(plt1_x_data), len(plt1_y_data),
len(plt2_x_data), len(plt2_y_data)]).min()
plot1.set_xdata(plt1_x_data[:data_len])
plot1.set_ydata(plt1_y_data[:data_len])
plot2.set_xdata(plt2_x_data[:data_len])
plot2.set_ydata(plt2_y_data[:data_len])
cursor.set_offsets([plt1_y_data[data_len - 1], plt2_y_data[data_len - 1]])
# cursor2.set_offsets([controls.temperature, controls.pressure])
current_time = plot1.get_xdata()[-1]
if last_plotted == current_time:
return
last_plotted = current_time
plot1.axes.set_xlim(plot1.get_xdata()[0], plot1.get_xdata()[-1])
plot1.axes.set_ylim(0.8 * plot1.get_ydata().min(),
1.2 * plot1.get_ydata().max())
plot2.axes.set_xlim(plot2.get_xdata()[0], plot2.get_xdata()[-1])
plot2.axes.set_ylim(0.8 * plot2.get_ydata().min(),
1.2 * plot2.get_ydata().max())
pyplot.draw()
t = Thread(target=main_thread)
t.daemon = True
vis.register_callback(update_plot, interval=1000)
controls = Controls()
t.start()
if controls.midi_input is not None:
t2 = Thread(target=midi_thread)
t2.daemon = True
t2.start()
if use_opengl:
gui = GUI()
vis.register_callback(gui.process_events, interval=1000)
vis.start()
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/deap-master/doc/conf.py | 1 | 8197 | # -*- coding: utf-8 -*-
#
# DEAP documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 30 13:21:43 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import time
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append("..")
# sys.path.append(os.path.abspath('_ext/'))
import deap
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.pngmath', 'sphinx.ext.intersphinx', 'sphinx.ext.extlinks',
'sphinx.ext.viewcode']
try:
import matplotlib
except:
pass
else:
extensions += ['matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DEAP'
copyright = u'2009-%s, DEAP Project' % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = deap.__version__
# The full version, including alpha/beta/rc tags.
release = deap.__revision__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, the todo will be printed in the documentation
todo_include_todos = True
# Search in python documentation
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None)}
# Reload the cached values every 5 days
intersphinx_cache_limit = 5
# -- Options for pyplot extension ----------------------------------------------
# Default value for the include-source option
plot_include_source = False
# Code that should be executed before each plot.
# plot_pre_code
# Base directory, to which ``plot::`` file names are relative
# to. (If None or empty, file names are relative to the
# directory where the file containing the directive is.)
# plot_basedir
# Whether to show links to the files in HTML.
plot_html_show_formats = True
# -- Options for extlinks extension ----------------------------------------------
import subprocess
try:
tree = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
except OSError:
import warnings
warnings.warn("Cannot link examples because we cannot retrieve the git version", Warning)
else:
extlinks = {'example': ('https://github.com/DEAP/deap/blob/{tree}/examples/%s.py'.format(tree=tree), "examples/")}
# -- Options for HTML output ---------------------------------------------------
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'classic' and 'alabaster'.
html_theme = 'pydoctheme'
# RTFD.org does not support sphinx 1.3.1 yet.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {'collapsiblesidebar': True}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "deap_orange_icon_32.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'DEAP-doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'DEAP.tex', u'DEAP Documentation',
u'DEAP Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\usepackage{amsmath,amssymb}'
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
| mit |
changshuaiwei/Udc-ML | smartcab/smartcab/agent.py | 1 | 28575 | import random
from environment import Agent, Environment
from planner import RoutePlanner
from simulator import Simulator
from collections import OrderedDict
import numpy as np
import pandas as pd
class LearningAgent_v2(Agent):
"""An agent that learns to drive in the smartcab world."""
def __init__(self, env):
super(LearningAgent_v2, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.color = 'red' # override color
self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint
# TODO: Initialize any additional variables here
self.trip_history = []
self.debug = True
self.gamma = 0.2 #upper bound of discount
#self.alpha = 0.5 #uppder bound of learning rate
self.epsilon = 0.1 #lower bound of proportion of random steps
self.reg = 0.001 # regularization param for regression
self.lr = 0.1 # learning rate for regression
self.clock_update = 0 # store number of updates
self.init_params_scale = 1e-4 # scale of initial params setting
self.max_memory = 400 # number of rows for state_action_experience
self.batch_size = 20 # size of batch
self.batch_step = 20 # extract a batch for each batch_step steps
self.param_step = self.max_memory # how many step should update w
self.state_feature = ['right_no', 'forward_no', 'left_no', 'next_right', 'next_forward', 'next_left']
self.action_feature = ['right', 'forward', 'left']
self.state = None
self.num_action_space = np.concatenate( ( np.diag(np.ones(3)), np.zeros(3)[np.newaxis,:]))
self.state_action_feature = self.state_feature + self.action_feature + [x + "_action_" + y for x in self.state_feature for y in self.action_feature]
#self.state_action_df = pd.DataFrame(columns = (self.state_action_feature + ['Q_score']) )
self.state_action_experience = np.zeros( (1, len(self.state_action_feature)) )
self.Q_score_experience = np.zeros(1)
self.ex_state = np.zeros( len(self.state_feature) )
self.ex_state_action = np.zeros( len(self.state_action_feature) )
self.ex_reward = 0
self.params = {'b': np.random.randn(1), 'w': self.init_params_scale * np.random.randn(len(self.state_action_feature),1)}
self.params_update = self.params
self.reward_history = np.zeros(1)
def reset(self, destination=None):
self.planner.route_to(destination)
# TODO: Prepare for a new trip; reset any variables here, if required
self.ex_reward = 0
self.ex_state_action = np.zeros( len(self.state_action_feature) )
self.ex_state = np.zeros( len(self.state_feature) )
if(len(self.trip_history) <150 ) :
print 'Current success rate is {}'.format( sum(self.trip_history)/(len(self.trip_history) + 0.000001) )
else:
print 'Success rate for recent 100 trials is {}'.format(sum(self.trip_history[-100:])/(len(self.trip_history[-100:]) + 0.000001))
print 'Average reward for recent moves is {}'.format(np.mean(self.reward_history))
if(self.reward_history.shape[0] > 1000) :
self.reward_history = np.delete(self.reward_history, range(100))
def numeric_state(self, inputs=None, deadline=0, next_waypoint=None):
#print 'inputs is {}, deadline is {}, next_waypoint is {}'.format(str(inputs), str(deadline), str(next_waypoint))
col_name = self.state_feature
state = np.zeros(len(col_name))
state += np.array( map(lambda x: x=='next_' + str(next_waypoint), col_name) )
if inputs['light'] == 'red' and inputs['left'] == 'forward':
#state += np.array( map(lambda x: x=='right_no', col_name) )
state[0] = 1
if inputs['light'] == 'red':
state[1] = 1
if inputs['light'] == 'red' or (inputs['oncoming'] == 'forward' or inputs['oncoming'] == 'right'):
state[2] = 1
#state[len(col_name)-1] = deadline
if False:
print 'inputs is {}, deadline is {}, next_waypoint is {}\n'.format(str(inputs), str(deadline), str(next_waypoint))
print zip(col_name,state)
raw_input("Press Enter to continue...")
return state
def numeric_action(self, action=None):
col_name = self.action_feature
return np.array( map(lambda x: x==str(action), col_name) )
def numeric_state_action(self, num_state=None , num_action=None ):
return np.concatenate( (num_state, num_action, np.outer(num_state,num_action).flatten() ), axis = 0)
def max_Q_param(self, num_state):
X = np.apply_along_axis(lambda x: self.numeric_state_action(num_state, x), axis = 1, arr=self.num_action_space)
score = X.dot(self.params['w']) + self.params['b']
if False:
print '\nX are\n {}\n, Params are\n {}\n'.format(str(X), str(self.params))
raw_input("Press Enter to continue...")
choose = np.argmax(score)
opt_action = None
if choose<3:
opt_action = self.action_feature[choose]
num_state_action = X[choose]
max_Q_hat = score[choose]
if False:
print '\nScores are\n {}\n, opt action are\n {}\n'.format(str(score), str(opt_action))
raw_input("Press Enter to continue...")
return opt_action, max_Q_hat, num_state_action
def gradient(self, X, y, reg=0.01):
if False:
print '\nX are\n {}\n and y are\n {}\n'.format(str(X), str(y))
raw_input("Press Enter to continue...")
w, b = self.params_update['w'], self.params_update['b']
scores = X.dot(w) + b
y = y.flatten()[:,np.newaxis]
loss = np.mean((y-scores)**2) + 0.5 * reg * np.sum(w**2)
if False:
print '\ny are\n {}\n and scores are\n {}\n and loss is\n {}\n'.format(str(y), str(scores), str(loss) )
raw_input("Press Enter to continue...")
d_w = np.mean((X*((scores-y)*2)),axis=0)[:,np.newaxis] + reg * w
d_b = np.mean((scores-y)*2)
return d_w, d_b, loss
def sample_X_y(self, size=10):
idx = np.random.randint(self.state_action_experience.shape[0],size=size)
X = self.state_action_experience[idx ,:]
y = self.Q_score_experience[idx]
return X, y
def update(self, t):
# Gather inputs
self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator
inputs = self.env.sense(self)
deadline = self.env.get_deadline(self)
#may need to take deadline into account?
# TODO: Update state
self.state_action_experience = np.concatenate( (self.state_action_experience , self.ex_state_action[np.newaxis,:]) )
num_state = self.numeric_state(inputs=inputs, deadline=deadline, next_waypoint=self.next_waypoint)
self.state = zip(self.state_feature, num_state)
# TODO: Select action according to your policy
action, max_Q_hat, num_state_action = self.max_Q_param(num_state)
if(random.uniform(0,1) < self.epsilon):
action = random.choice(Environment.valid_actions[:])
num_action = self.numeric_action(action)
num_state_action = self.numeric_state_action(num_state=num_state, num_action=num_action)
if False:
print "\n Use a random action, {}".format(str(action) )
#debug
raw_input("Press Enter to continue...")
true_Q_score = self.ex_reward + self.gamma * max_Q_hat
self.Q_score_experience = np.append(self.Q_score_experience, true_Q_score)
self.clock_update += 1
if False:
print '\nShape of State Action expreience Matrix is {}\n'.format(self.state_action_experience.shape)
print '\nShape of Q score experience is {}\n'.format(self.Q_score_experience.shape)
raw_input("Press Enter to continue...")
# TODO: Learn policy based on state, action, reward
reward = self.env.act(self, action)
self.ex_reward = reward
self.ex_state_action = num_state_action
self.reward_history = np.append(self.reward_history, reward)
if reward>9:
self.trip_history.append(1)
#need to write down something here
self.state_action_experience = np.concatenate( (self.state_action_experience , self.ex_state_action[np.newaxis,:]) )
self.Q_score_experience = np.append(self.Q_score_experience, reward)
self.clock_update += 1
elif deadline == 0:
self.trip_history.append(0)
self.state_action_experience = np.concatenate( (self.state_action_experience , self.ex_state_action[np.newaxis,:]) )
self.Q_score_experience = np.append(self.Q_score_experience, reward)
self.clock_update += 1
if(self.clock_update > self.max_memory + 2):
self.state_action_experience = np.delete(self.state_action_experience, range(self.state_action_experience.shape[0] - self.max_memory), 0 )
self.Q_score_experience = np.delete(self.Q_score_experience, range(len(self.Q_score_experience) - self.max_memory ) )
if False:
print '\nShape of State Action expreience Matrix is {}\n'.format(self.state_action_experience.shape)
print '\nShape of Q score experience is {}\n'.format(self.Q_score_experience.shape)
raw_input("Press Enter to continue...")
if(self.clock_update % self.batch_step == 0 ):
for i in xrange(2):
if False:
print '\nUpdated Parameters are {}\n'.format(str(self.params_update))
raw_input("Press Enter to continue...")
data_X, data_y = self.sample_X_y( size = self.batch_size )
d_w, d_b, loss = self.gradient(data_X, data_y, reg=self.reg)
if False:
print '\nGradiants are {} and {}\n'.format(str(d_w),str(d_b))
raw_input("Press Enter to continue...")
if False:
print '\nloss is {}\n'.format(loss)
raw_input("Press Enter to continue...")
self.params_update['w'] = self.params_update['w'] - self.lr * d_w
self.params_update['b'] = self.params_update['b'] - self.lr * d_b
if self.clock_update % self.param_step == 0:
self.params = self.params_update
if True:
print '\nBias for regression is {}\n'.format(str(self.params['b']))
weight_df = pd.DataFrame(data=self.params['w'].T, columns = self.state_action_feature)
print '\nWeights for regression is\n{}\n'.format(weight_df.T)
#raw_input("Press Enter to continue...")
#print "LearningAgent.update(): deadline = {}, inputs = {}, action = {}, reward = {}".format(deadline, inputs, action, reward) # [debug]
class LearningAgent_v1(Agent):
"""An agent that learns to drive in the smartcab world."""
def __init__(self, env):
super(LearningAgent_v1, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.color = 'red' # override color
self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint
# TODO: Initialize any additional variables here
self.gamma = 0.2 #upper bound of discount
self.alpha = 0.1 #uppder bound of learning rate
self.epsilon = 0.2 #lower bound of proportion of random steps
#self.state = {'deadline': None, 'forward_ok': True, 'left_ok': True, 'right_ok': True, 'next_waypoint': None }
self.state = {'forward_ok': True, 'left_ok': True, 'right_ok': True, 'next_waypoint': None }
self.exreward = 0
#self.exstate = {'deadline': None, 'forward_ok': True, 'left_ok': True, 'right_ok': True, 'next_waypoint': None }
self.exstate = {'forward_ok': True, 'left_ok': True, 'right_ok': True, 'next_waypoint': None }
self.exaction = None
self.debug = False
self.trip_history = []
self.Q = OrderedDict()
self.reward_history = np.zeros(1)
def reset(self, destination=None):
self.planner.route_to(destination)
# TODO: Prepare for a new trip; reset any variables here, if required
self.state = {'forward_ok': True, 'left_ok': True, 'right_ok': True, 'next_waypoint': None }
self.exreward = 0
self.exstate = {'forward_ok': True, 'left_ok': True, 'right_ok': True, 'next_waypoint': None }
self.exaction = None
if(len(self.trip_history) <150 ) :
print 'Current success rate is {}'.format( sum(self.trip_history)/(len(self.trip_history) + 0.000001) )
else:
print 'Success rate for recent 100 trials is {}'.format(sum(self.trip_history[-100:])/(len(self.trip_history[-100:]) + 0.000001))
print 'Average reward for recent moves is {}'.format(np.mean(self.reward_history))
if(self.reward_history.shape[0] > 1000) :
self.reward_history = np.delete(self.reward_history, range(100))
#print str(self.Q)
def update(self, t):
# Gather inputs
self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator
inputs = self.env.sense(self)
deadline = self.env.get_deadline(self)
#may need to take deadline into account?
# TODO: Update state
#self.state = {'inputs': inputs, 'deadline': deadline, 'next_waypoint':self.next_waypoint}
#self.state = {'inputs': inputs, 'next_waypoint':self.next_waypoint}
#epsilon = self.epsilon + (1-self.epsilon)/(t+1)*5
#gamma = ( 1- 10/(t+10) ) * self.gamma
#alpha = self.alpha/(t+1.0)
gamma = self.gamma
epsilon = self.epsilon
alpha = self.alpha
self.state['next_waypoint'] = self.next_waypoint
#self.state['deadline'] = int(deadline>5) + int(deadline>25)
self.state['right_ok'] = True
if inputs['light'] == 'red' and inputs['left'] == 'forward':
self.state['right_ok'] = False
if inputs['light'] == 'red':
self.state['forward_ok']=False
else:
self.state['forward_ok']=True
if inputs['light'] == 'red' or (inputs['oncoming'] == 'forward' or inputs['oncoming'] == 'right'):
self.state['left_ok']=False
else:
self.state['left_ok']=True
# TODO: Select action according to your policy
#action = random.choice(Environment.valid_actions[1:])
newkey = str(self.exstate.values()) + ':' + str(self.exaction)
if(self.debug):
print "\n New key is {}".format(newkey)
#debug
raw_input("Press Enter to continue...")
tmp_Q = dict([ (x, self.Q[x]) for x in self.Q.keys() if str(self.state.values()) in x])
#print tmp_Q
if self.debug:
print "\n Q value for future state is {}".format(str(tmp_Q))
#debug
raw_input("Press Enter to continue...")
action = random.choice(Environment.valid_actions[:])
tmp_max_Q = 0
if( len(tmp_Q) == 0 ):
tmp_max_Q = 0
action = random.choice(Environment.valid_actions[:])
else:
#tmp_idx = max(tmp_Q)
tmp_idx = max(tmp_Q.iterkeys(), key=(lambda key: tmp_Q[key]))
tmp_max_Q = tmp_Q[tmp_idx]
if( tmp_max_Q>0 or len(tmp_Q)==4 ):
#print tmp_idx
tmp_Q_split = tmp_idx.split(':')
#print tmp_Q_split
#print tmp_Q_split
action = tmp_Q_split[1]
if action=='None' :
action = None
else:
exist_actions = [x.split(':')[1] for x in tmp_Q.keys() ]
all_actions = ['None', 'forward', 'left', 'right']
remaining_actions = [x for x in all_actions if not (x in exist_actions)]
if self.debug:
print "Remaining actions are {}".format(str(remaining_actions))
action = random.choice(remaining_actions)
tmp_max_Q = 0
if action=='None' :
action = None
if self.debug:
print "\n future optimum action is {}".format(str(action))
#debug
raw_input("Press Enter to continue...")
if(random.uniform(0,1) < epsilon):
action = random.choice(Environment.valid_actions[:])
if self.debug:
print "\n Instead use a random action, {}".format(str(action) )
#debug
raw_input("Press Enter to continue...")
#print 'now ' + str(action)
#random guess have success rate about ~0.20
#action = random.choice(Environment.valid_actions[:])
newval = self.exreward + gamma * tmp_max_Q
if self.debug:
print "\n current reward is {0}, gamma is {1}, and estimated max future Q is {2}".format(self.exreward, gamma, tmp_max_Q)
#debug
raw_input("Press Enter to continue...")
if newkey in self.Q.keys():
self.Q[newkey] = self.Q[newkey] * (1-alpha) + alpha * newval
else:
self.Q[newkey] = self.alpha * newval
if self.debug:
print "updated Q values {}".format(str(self.Q))
#debug
raw_input("Press Enter to continue...")
#print t
# Execute action and get reward
reward = self.env.act(self, action)
if reward>9:
self.trip_history.append(1)
#need to write down something here
newkey = str(self.state.values()) + ':' + str(action)
newval = reward # + deadline
if newkey in self.Q.keys():
self.Q[newkey] = self.Q[newkey] * (1-alpha) + alpha * newval
else:
self.Q[newkey] = self.alpha * newval
elif deadline == 0:
self.trip_history.append(0)
newkey = str(self.state.values()) + ':' + str(action)
newval = reward
if newkey in self.Q.keys():
self.Q[newkey] = self.Q[newkey] * (1-alpha) + alpha * newval
else:
self.Q[newkey] = self.alpha * newval
# TODO: Learn policy based on state, action, reward
self.exreward = reward
self.exstate = self.state
self.exaction = action
self.reward_history = np.append(self.reward_history, reward)
#print "number of parameter is {0}, sum of Qfunction is {1}".format( len(self.Q.keys()), sum(self.Q.values()) )
#print "LearningAgent.update(): deadline = {}, inputs = {}, action = {}, reward = {}".format(deadline, inputs, action, reward) # [debug]
class LearningAgent_v0(Agent):
"""An agent that learns to drive in the smartcab world."""
def __init__(self, env):
super(LearningAgent_v0, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.color = 'red' # override color
self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint
# TODO: Initialize any additional variables here
self.gamma = 0.2 #upper bound of discount
self.alpha = 0.1 #uppder bound of learning rate
self.epsilon = 0.2 #lower bound of proportion of random steps
#self.state = {'deadline': None, 'forward_ok': True, 'left_ok': True, 'right_ok': True, 'next_waypoint': None }
self.state = {'light': 'green', 'oncoming': None, 'left': None, 'right': None, 'next_waypoint': None }
self.exreward = 0
#self.exstate = {'deadline': None, 'forward_ok': True, 'left_ok': True, 'right_ok': True, 'next_waypoint': None }
self.exstate = {'light': 'green', 'oncoming': None, 'left': None, 'right': None, 'next_waypoint': None }
self.exaction = None
self.debug = False
self.trip_history = []
self.reward_history = np.zeros(1)
self.Q = OrderedDict()
def reset(self, destination=None):
self.planner.route_to(destination)
# TODO: Prepare for a new trip; reset any variables here, if required
self.state = {'light': 'green', 'oncoming': None, 'left': None, 'right': None, 'next_waypoint': None }
self.exreward = 0
self.exstate = {'light': 'green', 'oncoming': None, 'left': None, 'right': None, 'next_waypoint': None }
self.exaction = None
if(len(self.trip_history) <150 ) :
print 'Current success rate is {}'.format( sum(self.trip_history)/(len(self.trip_history) + 0.000001) )
else:
print 'Success rate for recent 100 trials is {}'.format(sum(self.trip_history[-100:])/(len(self.trip_history[-100:]) + 0.000001))
print 'Average reward for recent moves is {}'.format(np.mean(self.reward_history))
if(self.reward_history.shape[0] > 1000) :
self.reward_history = np.delete(self.reward_history, range(100))
#print "number of parameter is {0}, sum of Qfunction is {1}".format( len(self.Q.keys()), sum(self.Q.values()) )
def update(self, t):
# Gather inputs
self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator
inputs = self.env.sense(self)
deadline = self.env.get_deadline(self)
#may need to take deadline into account?
# TODO: Update state
#self.state = {'inputs': inputs, 'deadline': deadline, 'next_waypoint':self.next_waypoint}
#self.state = {'inputs': inputs, 'next_waypoint':self.next_waypoint}
#epsilon = self.epsilon + (1-self.epsilon)/(t+1)*5
#gamma = ( 1- 10/(t+10) ) * self.gamma
#alpha = self.alpha/(t+1.0)
gamma = self.gamma
epsilon = self.epsilon
alpha = self.alpha
self.state['next_waypoint'] = self.next_waypoint
#self.state['deadline'] = int(deadline>5) + int(deadline>25)
for k in inputs.keys():
self.state[k] = inputs[k]
# TODO: Select action according to your policy
#action = random.choice(Environment.valid_actions[1:])
newkey = str(self.exstate.values()) + ':' + str(self.exaction)
if(self.debug):
print "\n New key is {}".format(newkey)
#debug
raw_input("Press Enter to continue...")
tmp_Q = dict([ (x, self.Q[x]) for x in self.Q.keys() if str(self.state.values()) in x])
#print tmp_Q
if self.debug:
print "\n Q value for future state is {}".format(str(tmp_Q))
#debug
raw_input("Press Enter to continue...")
action = random.choice(Environment.valid_actions[:])
tmp_max_Q = 0
if( len(tmp_Q) == 0 ):
tmp_max_Q = 0
action = random.choice(Environment.valid_actions[:])
else:
#tmp_idx = max(tmp_Q)
tmp_idx = max(tmp_Q.iterkeys(), key=(lambda key: tmp_Q[key]))
tmp_max_Q = tmp_Q[tmp_idx]
if( tmp_max_Q>0 or len(tmp_Q)==4 ):
#print tmp_idx
tmp_Q_split = tmp_idx.split(':')
#print tmp_Q_split
#print tmp_Q_split
action = tmp_Q_split[1]
if action=='None' :
action = None
else:
exist_actions = [x.split(':')[1] for x in tmp_Q.keys() ]
all_actions = ['None', 'forward', 'left', 'right']
remaining_actions = [x for x in all_actions if not (x in exist_actions)]
if self.debug:
print "Remaining actions are {}".format(str(remaining_actions))
action = random.choice(remaining_actions)
tmp_max_Q = 0
if action=='None' :
action = None
if self.debug:
print "\n future optimum action is {}".format(str(action))
#debug
raw_input("Press Enter to continue...")
if(random.uniform(0,1) < epsilon):
action = random.choice(Environment.valid_actions[:])
if self.debug:
print "\n Instead use a random action, {}".format(str(action) )
#debug
raw_input("Press Enter to continue...")
#print 'now ' + str(action)
#random guess have success rate about ~0.20
#action = random.choice(Environment.valid_actions[:])
newval = self.exreward + gamma * tmp_max_Q
if self.debug:
print "\n current reward is {0}, gamma is {1}, and estimated max future Q is {2}".format(self.exreward, gamma, tmp_max_Q)
#debug
raw_input("Press Enter to continue...")
if newkey in self.Q.keys():
self.Q[newkey] = self.Q[newkey] * (1-alpha) + alpha * newval
else:
self.Q[newkey] = self.alpha * newval
if self.debug:
print "updated Q values {}".format(str(self.Q))
#debug
raw_input("Press Enter to continue...")
#print t
# Execute action and get reward
reward = self.env.act(self, action)
if reward>9:
self.trip_history.append(1)
#need to write down something here
newkey = str(self.state.values()) + ':' + str(action)
newval = reward # + deadline
if newkey in self.Q.keys():
self.Q[newkey] = self.Q[newkey] * (1-alpha) + alpha * newval
else:
self.Q[newkey] = self.alpha * newval
elif deadline == 0:
self.trip_history.append(0)
newkey = str(self.state.values()) + ':' + str(action)
newval = reward
if newkey in self.Q.keys():
self.Q[newkey] = self.Q[newkey] * (1-alpha) + alpha * newval
else:
self.Q[newkey] = self.alpha * newval
# TODO: Learn policy based on state, action, reward
self.exreward = reward
self.exstate = self.state
self.exaction = action
self.reward_history = np.append(self.reward_history, reward)
#print "number of parameter is {0}, sum of Qfunction is {1}".format( len(self.Q.keys()), sum(self.Q.values()) )
#print "LearningAgent.update(): deadline = {}, inputs = {}, action = {}, reward = {}".format(deadline, inputs, action, reward) # [debug]
def run():
"""Run the agent for a finite number of trials."""
# Set up environment and agent
e = Environment() # create environment (also adds some dummy traffic)
a = e.create_agent(LearningAgent_v2) # create agent
e.set_primary_agent(a, enforce_deadline=True) # specify agent to track
# NOTE: You can set enforce_deadline=False while debugging to allow longer trials
# Now simulate it
sim = Simulator(e, update_delay=0.0001, display=False) # create simulator (uses pygame when display=True, if available)
# NOTE: To speed up simulation, reduce update_delay and/or set display=False
sim.run(n_trials=500) # run for a specified number of trials
# NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
if __name__ == '__main__':
run()
| gpl-3.0 |
ibis-project/ibis | ibis/backends/impala/tests/test_partition.py | 1 | 8063 | from posixpath import join as pjoin
import pandas as pd
import pandas.testing as tm
import pytest
import ibis
import ibis.util as util
from ibis.backends.impala.compat import ImpylaError
from ibis.tests.util import assert_equal
pytestmark = pytest.mark.impala
@pytest.fixture
def df():
df = pd.DataFrame(
{
'year': [2009] * 3 + [2010] * 3,
'month': list(map(str, [1, 2, 3] * 2)),
'value': list(range(1, 7)),
},
index=list(range(6)),
)
df = pd.concat([df] * 10, ignore_index=True)
df['id'] = df.index.values
return df
@pytest.fixture
def unpart_t(con, df, tmp_db):
pd_name = '__ibis_test_partition_{}'.format(util.guid())
con.create_table(pd_name, df, database=tmp_db)
try:
yield con.table(pd_name, database=tmp_db)
finally:
assert con.exists_table(pd_name, database=tmp_db), pd_name
con.drop_table(pd_name, database=tmp_db)
def test_is_partitioned(con, temp_table):
schema = ibis.schema(
[('foo', 'string'), ('year', 'int32'), ('month', 'string')]
)
name = temp_table
con.create_table(name, schema=schema, partition=['year', 'month'])
assert con.table(name).is_partitioned
def test_create_table_with_partition_column(con, temp_table_db):
schema = ibis.schema(
[
('year', 'int32'),
('month', 'string'),
('day', 'int8'),
('value', 'double'),
]
)
tmp_db, name = temp_table_db
con.create_table(
name, schema=schema, database=tmp_db, partition=['year', 'month']
)
# the partition column get put at the end of the table
ex_schema = ibis.schema(
[
('day', 'int8'),
('value', 'double'),
('year', 'int32'),
('month', 'string'),
]
)
table_schema = con.get_schema(name, database=tmp_db)
assert_equal(table_schema, ex_schema)
partition_schema = con.database(tmp_db).table(name).partition_schema()
expected = ibis.schema([('year', 'int32'), ('month', 'string')])
assert_equal(partition_schema, expected)
def test_create_partitioned_separate_schema(con, temp_table):
schema = ibis.schema([('day', 'int8'), ('value', 'double')])
part_schema = ibis.schema([('year', 'int32'), ('month', 'string')])
name = temp_table
con.create_table(name, schema=schema, partition=part_schema)
# the partition column get put at the end of the table
ex_schema = ibis.schema(
[
('day', 'int8'),
('value', 'double'),
('year', 'int32'),
('month', 'string'),
]
)
table_schema = con.get_schema(name)
assert_equal(table_schema, ex_schema)
partition_schema = con.table(name).partition_schema()
assert_equal(partition_schema, part_schema)
def test_unpartitioned_table_get_schema(con):
tname = 'functional_alltypes'
with pytest.raises(ImpylaError):
con.table(tname).partition_schema()
def test_insert_select_partitioned_table(con, df, temp_table, unpart_t):
part_keys = ['year', 'month']
con.create_table(temp_table, schema=unpart_t.schema(), partition=part_keys)
part_t = con.table(temp_table)
unique_keys = df[part_keys].drop_duplicates()
for i, (year, month) in enumerate(unique_keys.itertuples(index=False)):
select_stmt = unpart_t[
(unpart_t.year == year) & (unpart_t.month == month)
]
# test both styles of insert
if i:
part = {'year': year, 'month': month}
else:
part = [year, month]
part_t.insert(select_stmt, partition=part)
verify_partitioned_table(part_t, df, unique_keys)
def test_create_partitioned_table_from_expr(con, alltypes):
t = alltypes
expr = t[t.id <= 10][['id', 'double_col', 'month', 'year']]
name = 'tmppart_{}'.format(util.guid())
try:
con.create_table(name, expr, partition=[t.year])
except Exception:
raise
else:
new = con.table(name)
expected = expr.execute().sort_values('id').reset_index(drop=True)
result = new.execute().sort_values('id').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
finally:
con.drop_table(name, force=True)
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_insert_overwrite_partition():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_dynamic_partitioning():
assert False
def test_add_drop_partition_no_location(con, temp_table):
schema = ibis.schema(
[('foo', 'string'), ('year', 'int32'), ('month', 'int16')]
)
name = temp_table
con.create_table(name, schema=schema, partition=['year', 'month'])
table = con.table(name)
part = {'year': 2007, 'month': 4}
table.add_partition(part)
assert len(table.partitions()) == 2
table.drop_partition(part)
assert len(table.partitions()) == 1
def test_add_drop_partition_owned_by_impala(hdfs, con, temp_table):
schema = ibis.schema(
[('foo', 'string'), ('year', 'int32'), ('month', 'int16')]
)
name = temp_table
con.create_table(name, schema=schema, partition=['year', 'month'])
table = con.table(name)
part = {'year': 2007, 'month': 4}
subdir = util.guid()
basename = util.guid()
path = '/tmp/{}/{}'.format(subdir, basename)
hdfs.mkdir('/tmp/{}'.format(subdir))
hdfs.chown('/tmp/{}'.format(subdir), owner='impala', group='supergroup')
table.add_partition(part, location=path)
assert len(table.partitions()) == 2
table.drop_partition(part)
assert len(table.partitions()) == 1
def test_add_drop_partition_hive_bug(con, temp_table):
schema = ibis.schema(
[('foo', 'string'), ('year', 'int32'), ('month', 'int16')]
)
name = temp_table
con.create_table(name, schema=schema, partition=['year', 'month'])
table = con.table(name)
part = {'year': 2007, 'month': 4}
path = '/tmp/{}'.format(util.guid())
table.add_partition(part, location=path)
assert len(table.partitions()) == 2
table.drop_partition(part)
assert len(table.partitions()) == 1
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_set_partition_location():
assert False
def test_load_data_partition(con, hdfs, tmp_dir, unpart_t, df, temp_table):
part_keys = ['year', 'month']
con.create_table(temp_table, schema=unpart_t.schema(), partition=part_keys)
part_t = con.table(temp_table)
# trim the runtime of this test
df = df[df.month == '1'].reset_index(drop=True)
unique_keys = df[part_keys].drop_duplicates()
hdfs_dir = pjoin(tmp_dir, 'load-data-partition')
df2 = df.drop(['year', 'month'], axis='columns')
csv_props = {'serialization.format': ',', 'field.delim': ','}
for i, (year, month) in enumerate(unique_keys.itertuples(index=False)):
chunk = df2[(df.year == year) & (df.month == month)]
chunk_path = pjoin(hdfs_dir, '{}.csv'.format(i))
con.write_dataframe(chunk, chunk_path)
# test both styles of insert
if i:
part = {'year': year, 'month': month}
else:
part = [year, month]
part_t.add_partition(part)
part_t.alter_partition(part, format='text', serde_properties=csv_props)
part_t.load_data(chunk_path, partition=part)
hdfs.rmdir(hdfs_dir)
verify_partitioned_table(part_t, df, unique_keys)
def verify_partitioned_table(part_t, df, unique_keys):
result = (
part_t.execute()
.sort_values(by='id')
.reset_index(drop=True)[df.columns]
)
tm.assert_frame_equal(result, df)
parts = part_t.partitions()
# allow for the total line
assert len(parts) == len(unique_keys) + 1
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_drop_partition():
assert False
@pytest.mark.xfail(raises=AssertionError, reason='NYT')
def test_repartition_automated():
assert False
| apache-2.0 |
mrustl/flopy | setup.py | 1 | 1530 | import os
import sys
from setuptools import setup
# To use:
# python setup.py bdist --format=wininst
from flopy import __version__, __name__, __author__
# trap someone trying to install flopy with something other
# than python 2 or 3
if not sys.version_info[0] in [2, 3]:
print('Sorry, Flopy not supported in your Python version')
print(' Supported versions: 2 and 3')
print(' Your version of Python: {}'.format(sys.version_info[0]))
sys.exit(1) # return non-zero value for failure
long_description = ''
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except:
pass
setup(name=__name__,
description='FloPy is a Python package to create, run, and post-process MODFLOW-based models.',
long_description=long_description,
author=__author__,
author_email='[email protected], [email protected], [email protected], [email protected], ' +
'[email protected], [email protected], [email protected], [email protected], [email protected]',
url='https://github.com/modflowpy/flopy/',
license='New BSD',
platforms='Windows, Mac OS-X',
install_requires=['numpy>=1.7', 'matplotlib>=1.3'],
packages=['flopy', 'flopy.modflow', 'flopy.modpath', 'flopy.mt3d',
'flopy.seawat', 'flopy.utils', 'flopy.plot', 'flopy.pest',
'flopy.export'],
# use this version ID if .svn data cannot be found
version=__version__)
| bsd-3-clause |
bhermanmit/openmc | openmc/plotter.py | 1 | 37810 | from numbers import Integral, Real
from six import string_types
from itertools import chain
import numpy as np
import openmc.checkvalue as cv
import openmc.data
# Supported keywords for continuous-energy cross section plotting
PLOT_TYPES = ['total', 'scatter', 'elastic', 'inelastic', 'fission',
'absorption', 'capture', 'nu-fission', 'nu-scatter', 'unity',
'slowing-down power', 'damage']
# Supported keywoards for multi-group cross section plotting
PLOT_TYPES_MGXS = ['total', 'absorption', 'scatter', 'fission',
'kappa-fission', 'nu-fission', 'prompt-nu-fission',
'deleyed-nu-fission', 'chi', 'chi-prompt', 'chi-delayed',
'inverse-velocity', 'beta', 'decay rate', 'unity']
# Create a dictionary which can be used to convert PLOT_TYPES_MGXS to the
# openmc.XSdata attribute name needed to access the data
_PLOT_MGXS_ATTR = {line: line.replace(' ', '_').replace('-', '_')
for line in PLOT_TYPES_MGXS}
_PLOT_MGXS_ATTR['scatter'] = 'scatter_matrix'
# Special MT values
UNITY_MT = -1
XI_MT = -2
# MTs to combine to generate associated plot_types
_INELASTIC = [mt for mt in openmc.data.SUM_RULES[3] if mt != 27]
PLOT_TYPES_MT = {'total': openmc.data.SUM_RULES[1],
'scatter': [2] + _INELASTIC,
'elastic': [2],
'inelastic': _INELASTIC,
'fission': [18],
'absorption': [27], 'capture': [101],
'nu-fission': [18],
'nu-scatter': [2] + _INELASTIC,
'unity': [UNITY_MT],
'slowing-down power': [2] + _INELASTIC + [XI_MT],
'damage': [444]}
# Operations to use when combining MTs the first np.add is used in reference
# to zero
PLOT_TYPES_OP = {'total': (np.add,),
'scatter': (np.add,) * (len(PLOT_TYPES_MT['scatter']) - 1),
'elastic': (),
'inelastic': (np.add,) * (len(PLOT_TYPES_MT['inelastic']) - 1),
'fission': (), 'absorption': (),
'capture': (), 'nu-fission': (),
'nu-scatter': (np.add,) * (len(PLOT_TYPES_MT['nu-scatter']) - 1),
'unity': (),
'slowing-down power':
(np.add,) * (len(PLOT_TYPES_MT['slowing-down power']) - 2) + (np.multiply,),
'damage': ()}
# Types of plots to plot linearly in y
PLOT_TYPES_LINEAR = {'nu-fission / fission', 'nu-scatter / scatter',
'nu-fission / absorption', 'fission / absorption'}
# Minimum and maximum energies for plotting (units of eV)
_MIN_E = 1.e-5
_MAX_E = 20.e6
def plot_xs(this, types, divisor_types=None, temperature=294., axis=None,
sab_name=None, ce_cross_sections=None, mg_cross_sections=None,
enrichment=None, plot_CE=True, orders=None, divisor_orders=None,
**kwargs):
"""Creates a figure of continuous-energy cross sections for this item.
Parameters
----------
this : openmc.Element, openmc.Nuclide, or openmc.Material
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to include in the plot.
divisor_types : Iterable of values of PLOT_TYPES, optional
Cross section types which will divide those produced by types
before plotting. A type of 'unity' can be used to effectively not
divide some types.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
axis : matplotlib.axes, optional
A previously generated axis to use for plotting. If not specified,
a new axis and figure will be generated.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable; only used
for items which are instances of openmc.Element or openmc.Nuclide
ce_cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
mg_cross_sections : str, optional
Location of MGXS HDF5 Library file. Default is None.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None. This is only used for
items which are instances of openmc.Element
plot_CE : bool, optional
Denotes whether or not continuous-energy will be plotted. Defaults to
plotting the continuous-energy data.
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data. This only applies to plots of
multi-group data.
divisor_orders : Iterable of Integral, optional
Same as orders, but for divisor_types
**kwargs
All keyword arguments are passed to
:func:`matplotlib.pyplot.figure`.
Returns
-------
fig : matplotlib.figure.Figure
If axis is None, then a Matplotlib Figure of the generated
cross section will be returned. Otherwise, a value of
None will be returned as the figure and axes have already been
generated.
"""
from matplotlib import pyplot as plt
cv.check_type("plot_CE", plot_CE, bool)
if isinstance(this, openmc.Nuclide):
data_type = 'nuclide'
elif isinstance(this, openmc.Element):
data_type = 'element'
elif isinstance(this, openmc.Material):
data_type = 'material'
elif isinstance(this, openmc.Macroscopic):
data_type = 'macroscopic'
else:
raise TypeError("Invalid type for plotting")
if plot_CE:
# Calculate for the CE cross sections
E, data = calculate_cexs(this, types, temperature, sab_name,
ce_cross_sections, enrichment)
if divisor_types:
cv.check_length('divisor types', divisor_types, len(types))
Ediv, data_div = calculate_cexs(this, divisor_types, temperature,
sab_name, ce_cross_sections,
enrichment)
# Create a new union grid, interpolate data and data_div on to that
# grid, and then do the actual division
Enum = E[:]
E = np.union1d(Enum, Ediv)
data_new = np.zeros((len(types), len(E)))
for line in range(len(types)):
data_new[line, :] = \
np.divide(np.interp(E, Enum, data[line, :]),
np.interp(E, Ediv, data_div[line, :]))
if divisor_types[line] != 'unity':
types[line] = types[line] + ' / ' + divisor_types[line]
data = data_new
else:
# Calculate for MG cross sections
E, data = calculate_mgxs(this, types, orders, temperature,
mg_cross_sections, ce_cross_sections,
enrichment)
if divisor_types:
cv.check_length('divisor types', divisor_types, len(types))
Ediv, data_div = calculate_mgxs(this, divisor_types,
divisor_orders, temperature,
mg_cross_sections,
ce_cross_sections, enrichment)
# Perform the division
for line in range(len(types)):
data[line, :] /= data_div[line, :]
if divisor_types[line] != 'unity':
types[line] += ' / ' + divisor_types[line]
# Generate the plot
if axis is None:
fig = plt.figure(**kwargs)
ax = fig.add_subplot(111)
else:
fig = None
ax = axis
# Set to loglog or semilogx depending on if we are plotting a data
# type which we expect to vary linearly
if set(types).issubset(PLOT_TYPES_LINEAR):
plot_func = ax.semilogx
else:
plot_func = ax.loglog
# Plot the data
for i in range(len(data)):
data[i, :] = np.nan_to_num(data[i, :])
if np.sum(data[i, :]) > 0.:
plot_func(E, data[i, :], label=types[i])
ax.set_xlabel('Energy [eV]')
if plot_CE:
ax.set_xlim(_MIN_E, _MAX_E)
else:
ax.set_xlim(E[-1], E[0])
if divisor_types:
if data_type == 'nuclide':
ylabel = 'Nuclidic Microscopic Data'
elif data_type == 'element':
ylabel = 'Elemental Microscopic Data'
elif data_type == 'material' or data_type == 'macroscopic':
ylabel = 'Macroscopic Data'
else:
if data_type == 'nuclide':
ylabel = 'Microscopic Cross Section [b]'
elif data_type == 'element':
ylabel = 'Elemental Cross Section [b]'
elif data_type == 'material' or data_type == 'macroscopic':
ylabel = 'Macroscopic Cross Section [1/cm]'
ax.set_ylabel(ylabel)
ax.legend(loc='best')
if this.name is not None and this.name != '':
if len(types) > 1:
ax.set_title('Cross Sections for ' + this.name)
else:
ax.set_title('Cross Section for ' + this.name)
return fig
def calculate_cexs(this, types, temperature=294., sab_name=None,
cross_sections=None, enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : openmc.Element, openmc.Nuclide, or openmc.Material
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to calculate
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check types
cv.check_type('temperature', temperature, Real)
if sab_name:
cv.check_type('sab_name', sab_name, string_types)
if enrichment:
cv.check_type('enrichment', enrichment, Real)
if isinstance(this, openmc.Nuclide):
energy_grid, xs = _calculate_cexs_nuclide(this, types, temperature,
sab_name, cross_sections)
# Convert xs (Iterable of Callable) to a grid of cross section values
# calculated on @ the points in energy_grid for consistency with the
# element and material functions.
data = np.zeros((len(types), len(energy_grid)))
for line in range(len(types)):
data[line, :] = xs[line](energy_grid)
elif isinstance(this, openmc.Element):
energy_grid, data = _calculate_cexs_elem_mat(this, types, temperature,
cross_sections, sab_name,
enrichment)
elif isinstance(this, openmc.Material):
energy_grid, data = _calculate_cexs_elem_mat(this, types, temperature,
cross_sections)
else:
raise TypeError("Invalid type")
return energy_grid, data
def _calculate_cexs_nuclide(this, types, temperature=294., sab_name=None,
cross_sections=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : openmc.Nuclide
Nuclide object to source data from
types : Iterable of str or Integral
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES or integers which correspond to reaction
channel (MT) numbers.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : Iterable of Callable
Requested cross section functions
"""
# Parse the types
mts = []
ops = []
yields = []
for line in types:
if line in PLOT_TYPES:
mts.append(PLOT_TYPES_MT[line])
if line.startswith('nu'):
yields.append(True)
else:
yields.append(False)
ops.append(PLOT_TYPES_OP[line])
else:
# Not a built-in type, we have to parse it ourselves
cv.check_type('MT in types', line, Integral)
cv.check_greater_than('MT in types', line, 0)
mts.append((line,))
ops.append(())
yields.append(False)
# Load the library
library = openmc.data.DataLibrary.from_xml(cross_sections)
# Convert temperature to format needed for access in the library
strT = "{}K".format(int(round(temperature)))
T = temperature
# Now we can create the data sets to be plotted
energy_grid = []
xs = []
lib = library.get_by_material(this.name)
if lib is not None:
nuc = openmc.data.IncidentNeutron.from_hdf5(lib['path'])
# Obtain the nearest temperature
if strT in nuc.temperatures:
nucT = strT
else:
delta_T = np.array(nuc.kTs) - T * openmc.data.K_BOLTZMANN
closest_index = np.argmin(np.abs(delta_T))
nucT = nuc.temperatures[closest_index]
# Prep S(a,b) data if needed
if sab_name:
sab = openmc.data.ThermalScattering.from_hdf5(sab_name)
# Obtain the nearest temperature
if strT in sab.temperatures:
sabT = strT
else:
delta_T = np.array(sab.kTs) - T * openmc.data.K_BOLTZMANN
closest_index = np.argmin(np.abs(delta_T))
sabT = sab.temperatures[closest_index]
# Create an energy grid composed the S(a,b) and the nuclide's grid
grid = nuc.energy[nucT]
sab_Emax = 0.
sab_funcs = []
if sab.elastic_xs:
elastic = sab.elastic_xs[sabT]
if isinstance(elastic, openmc.data.CoherentElastic):
grid = np.union1d(grid, elastic.bragg_edges)
if elastic.bragg_edges[-1] > sab_Emax:
sab_Emax = elastic.bragg_edges[-1]
elif isinstance(elastic, openmc.data.Tabulated1D):
grid = np.union1d(grid, elastic.x)
if elastic.x[-1] > sab_Emax:
sab_Emax = elastic.x[-1]
sab_funcs.append(elastic)
if sab.inelastic_xs:
inelastic = sab.inelastic_xs[sabT]
grid = np.union1d(grid, inelastic.x)
if inelastic.x[-1] > sab_Emax:
sab_Emax = inelastic.x[-1]
sab_funcs.append(inelastic)
energy_grid = grid
else:
energy_grid = nuc.energy[nucT]
for i, mt_set in enumerate(mts):
# Get the reaction xs data from the nuclide
funcs = []
op = ops[i]
for mt in mt_set:
if mt == 2:
if sab_name:
# Then we need to do a piece-wise function of
# The S(a,b) and non-thermal data
sab_sum = openmc.data.Sum(sab_funcs)
pw_funcs = openmc.data.Regions1D(
[sab_sum, nuc[mt].xs[nucT]],
[sab_Emax])
funcs.append(pw_funcs)
else:
funcs.append(nuc[mt].xs[nucT])
elif mt in nuc:
if yields[i]:
# Get the total yield first if available. This will be
# used primarily for fission.
for prod in chain(nuc[mt].products,
nuc[mt].derived_products):
if prod.particle == 'neutron' and \
prod.emission_mode == 'total':
func = openmc.data.Combination(
[nuc[mt].xs[nucT], prod.yield_],
[np.multiply])
funcs.append(func)
break
else:
# Total doesn't exist so we have to create from
# prompt and delayed. This is used for scatter
# multiplication.
func = None
for prod in chain(nuc[mt].products,
nuc[mt].derived_products):
if prod.particle == 'neutron' and \
prod.emission_mode != 'total':
if func:
func = openmc.data.Combination(
[prod.yield_, func], [np.add])
else:
func = prod.yield_
if func:
funcs.append(openmc.data.Combination(
[func, nuc[mt].xs[nucT]], [np.multiply]))
else:
# If func is still None, then there were no
# products. In that case, assume the yield is
# one as its not provided for some summed
# reactions like MT=4
funcs.append(nuc[mt].xs[nucT])
else:
funcs.append(nuc[mt].xs[nucT])
elif mt == UNITY_MT:
funcs.append(lambda x: 1.)
elif mt == XI_MT:
awr = nuc.atomic_weight_ratio
alpha = ((awr - 1.) / (awr + 1.))**2
xi = 1. + alpha * np.log(alpha) / (1. - alpha)
funcs.append(lambda x: xi)
else:
funcs.append(lambda x: 0.)
xs.append(openmc.data.Combination(funcs, op))
else:
raise ValueError(this.name + " not in library")
return energy_grid, xs
def _calculate_cexs_elem_mat(this, types, temperature=294.,
cross_sections=None, sab_name=None,
enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
Parameters
----------
this : {openmc.Material, openmc.Element}
Object to source data from
types : Iterable of values of PLOT_TYPES
The type of cross sections to calculate
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
sab_name : str, optional
Name of S(a,b) library to apply to MT=2 data when applicable.
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
if isinstance(this, openmc.Material):
if this.temperature is not None:
T = this.temperature
else:
T = temperature
else:
T = temperature
# Load the library
library = openmc.data.DataLibrary.from_xml(cross_sections)
if isinstance(this, openmc.Material):
# Expand elements in to nuclides with atomic densities
nuclides = this.get_nuclide_atom_densities()
# For ease of processing split out the nuclide and its fraction
nuc_fractions = {nuclide[1][0].name: nuclide[1][1]
for nuclide in nuclides.items()}
# Create a dict of [nuclide name] = nuclide object to carry forward
# with a common nuclides format between openmc.Material and
# openmc.Element objects
nuclides = {nuclide[1][0].name: nuclide[1][0]
for nuclide in nuclides.items()}
else:
# Expand elements in to nuclides with atomic densities
nuclides = this.expand(1., 'ao', enrichment=enrichment,
cross_sections=cross_sections)
# For ease of processing split out the nuclide and its fraction
nuc_fractions = {nuclide[0].name: nuclide[1] for nuclide in nuclides}
# Create a dict of [nuclide name] = nuclide object to carry forward
# with a common nuclides format between openmc.Material and
# openmc.Element objects
nuclides = {nuclide[0].name: nuclide[0] for nuclide in nuclides}
# Identify the nuclides which have S(a,b) data
sabs = {}
for nuclide in nuclides.items():
sabs[nuclide[0]] = None
if isinstance(this, openmc.Material):
for sab_name in this._sab:
sab = openmc.data.ThermalScattering.from_hdf5(
library.get_by_material(sab_name)['path'])
for nuc in sab.nuclides:
sabs[nuc] = library.get_by_material(sab_name)['path']
else:
if sab_name:
sab = openmc.data.ThermalScattering.from_hdf5(sab_name)
for nuc in sab.nuclides:
sabs[nuc] = library.get_by_material(sab_name)['path']
# Now we can create the data sets to be plotted
xs = {}
E = []
for nuclide in nuclides.items():
name = nuclide[0]
nuc = nuclide[1]
sab_tab = sabs[name]
temp_E, temp_xs = calculate_cexs(nuc, types, T, sab_tab,
cross_sections)
E.append(temp_E)
# Since the energy grids are different, store the cross sections as
# a tabulated function so they can be calculated on any grid needed.
xs[name] = [openmc.data.Tabulated1D(temp_E, temp_xs[line])
for line in range(len(types))]
# Condense the data for every nuclide
# First create a union energy grid
energy_grid = E[0]
for grid in E[1:]:
energy_grid = np.union1d(energy_grid, grid)
# Now we can combine all the nuclidic data
data = np.zeros((len(types), len(energy_grid)))
for line in range(len(types)):
if types[line] == 'unity':
data[line, :] = 1.
else:
for nuclide in nuclides.items():
name = nuclide[0]
data[line, :] += (nuc_fractions[name] *
xs[name][line](energy_grid))
return energy_grid, data
def calculate_mgxs(this, types, orders=None, temperature=294.,
cross_sections=None, ce_cross_sections=None,
enrichment=None):
"""Calculates continuous-energy cross sections of a requested type.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Element, openmc.Nuclide, openmc.Material, or openmc.Macroscopic
Object to source data from
types : Iterable of values of PLOT_TYPES_MGXS
The type of cross sections to calculate
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
cross_sections : str, optional
Location of MGXS HDF5 Library file. Default is None.
ce_cross_sections : str, optional
Location of continuous-energy cross_sections.xml file. Default is None.
This is used only for expanding an openmc.Element object passed as this
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
energy_grid : numpy.ndarray
Energies at which cross sections are calculated, in units of eV
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check types
cv.check_type('temperature', temperature, Real)
if enrichment:
cv.check_type('enrichment', enrichment, Real)
cv.check_iterable_type('types', types, string_types)
cv.check_type("cross_sections", cross_sections, str)
library = openmc.MGXSLibrary.from_hdf5(cross_sections)
if isinstance(this, (openmc.Nuclide, openmc.Macroscopic)):
mgxs = _calculate_mgxs_nuc_macro(this, types, library, orders,
temperature)
elif isinstance(this, (openmc.Element, openmc.Material)):
mgxs = _calculate_mgxs_elem_mat(this, types, library, orders,
temperature, ce_cross_sections,
enrichment)
else:
raise TypeError("Invalid type")
# Convert the data to the format needed
data = np.zeros((len(types), 2 * library.energy_groups.num_groups))
energy_grid = np.zeros(2 * library.energy_groups.num_groups)
for g in range(library.energy_groups.num_groups):
energy_grid[g * 2: g * 2 + 2] = \
library.energy_groups.group_edges[g: g + 2]
# Ensure the energy will show on a log-axis by replacing 0s with a
# sufficiently small number
energy_grid[0] = max(energy_grid[0], _MIN_E)
for line in range(len(types)):
for g in range(library.energy_groups.num_groups):
data[line, g * 2: g * 2 + 2] = mgxs[line, g]
return energy_grid[::-1], data
def _calculate_mgxs_nuc_macro(this, types, library, orders=None,
temperature=294.):
"""Determines the multi-group cross sections of a nuclide or macroscopic
object.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Nuclide or openmc.Macroscopic
Object to source data from
types : Iterable of str
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES_MGXS
library : openmc.MGXSLibrary
MGXS Library containing the data of interest
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
Returns
-------
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
# Check the parameters and grab order/delayed groups
if orders:
cv.check_iterable_type('orders', orders, Integral,
min_depth=len(types), max_depth=len(types))
else:
orders = [None] * len(types)
for i, line in enumerate(types):
cv.check_type("line", line, str)
cv.check_value("line", line, PLOT_TYPES_MGXS)
if orders[i]:
cv.check_greater_than("order value", orders[i], 0, equality=True)
xsdata = library.get_by_name(this.name)
if xsdata is not None:
# Obtain the nearest temperature
t = np.abs(xsdata.temperatures - temperature).argmin()
# Get the data
data = np.zeros((len(types), library.energy_groups.num_groups))
for i, line in enumerate(types):
if 'fission' in line and not xsdata.fissionable:
continue
elif line == 'unity':
data[i, :] = 1.
else:
# Now we have to get the cross section data and properly
# treat it depending on the requested type.
# First get the data in a generic fashion
temp_data = getattr(xsdata, _PLOT_MGXS_ATTR[line])[t]
shape = temp_data.shape[:]
# If we have angular data, then want the geometric
# average over all provided angles. Since the angles are
# equi-distant, un-weighted averaging will suffice
if xsdata.representation == 'angle':
temp_data = np.mean(temp_data, axis=(0, 1))
# Now we can look at the shape of the data to identify how
# it should be modified to produce an array of values
# with groups.
if shape in (xsdata.xs_shapes["[G']"],
xsdata.xs_shapes["[G]"]):
# Then the data is already an array vs groups so copy
# and move along
data[i, :] = temp_data
elif shape == xsdata.xs_shapes["[G][G']"]:
# Sum the data over outgoing groups to create our array vs
# groups
data[i, :] = np.sum(temp_data, axis=1)
elif shape == xsdata.xs_shapes["[DG]"]:
# Then we have a constant vs groups with a value for each
# delayed group. The user-provided value of orders tells us
# which delayed group we want. If none are provided, then
# we sum all the delayed groups together.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i]]
else:
data[i, :] = np.sum(temp_data[:])
elif shape in (xsdata.xs_shapes["[DG][G']"],
xsdata.xs_shapes["[DG][G]"]):
# Then we have an array vs groups with values for each
# delayed group. The user-provided value of orders tells us
# which delayed group we want. If none are provided, then
# we sum all the delayed groups together.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i], :]
else:
data[i, :] = np.sum(temp_data[:, :], axis=0)
elif shape == xsdata.xs_shapes["[DG][G][G']"]:
# Then we have a delayed group matrix. We will first
# remove the outgoing group dependency
temp_data = np.sum(temp_data, axis=-1)
# And then proceed in exactly the same manner as the
# "[DG][G']" or "[DG][G]" shapes in the previous block.
if orders[i]:
if orders[i] < len(shape[0]):
data[i, :] = temp_data[orders[i], :]
else:
data[i, :] = np.sum(temp_data[:, :], axis=0)
elif shape == xsdata.xs_shapes["[G][G'][Order]"]:
# This is a scattering matrix with angular data
# First remove the outgoing group dependence
temp_data = np.sum(temp_data, axis=1)
# The user either provided a specific order or we resort
# to the default 0th order
if orders[i]:
order = orders[i]
else:
order = 0
# If the order is available, store the data for that order
# if it is not available, then the expansion coefficient
# is zero and thus we already have the correct value.
if order < shape[1]:
data[i, :] = temp_data[:, order]
else:
raise ValueError("{} not present in provided MGXS "
"library".format(this.name))
return data
def _calculate_mgxs_elem_mat(this, types, library, orders=None,
temperature=294., ce_cross_sections=None,
enrichment=None):
"""Determines the multi-group cross sections of an element or material
object.
If the data for the nuclide or macroscopic object in the library is
represented as angle-dependent data then this method will return the
geometric average cross section over all angles.
Parameters
----------
this : openmc.Element or openmc.Material
Object to source data from
types : Iterable of str
The type of cross sections to calculate; values can either be those
in openmc.PLOT_TYPES_MGXS
library : openmc.MGXSLibrary
MGXS Library containing the data of interest
orders : Iterable of Integral, optional
The scattering order or delayed group index to use for the
corresponding entry in types. Defaults to the 0th order for scattering
and the total delayed neutron data.
temperature : float, optional
Temperature in Kelvin to plot. If not specified, a default
temperature of 294K will be plotted. Note that the nearest
temperature in the library for each nuclide will be used as opposed
to using any interpolation.
ce_cross_sections : str, optional
Location of continuous-energy cross_sections.xml file. Default is None.
This is used only for expanding the elements
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
Returns
-------
data : numpy.ndarray
Cross sections calculated at the energy grid described by energy_grid
"""
if isinstance(this, openmc.Material):
if this.temperature is not None:
T = this.temperature
else:
T = temperature
# Check to see if we have nuclides/elements or a macrocopic object
if this._macroscopic is not None:
# We have macroscopics
nuclides = {this._macroscopic: (this._macroscopic, this.density)}
else:
# Expand elements in to nuclides with atomic densities
nuclides = this.get_nuclide_atom_densities()
# For ease of processing split out nuc and nuc_density
nuc_fraction = [nuclide[1][1] for nuclide in nuclides.items()]
else:
T = temperature
# Expand elements in to nuclides with atomic densities
nuclides = this.expand(100., 'ao', enrichment=enrichment,
cross_sections=ce_cross_sections)
# For ease of processing split out nuc and nuc_fractions
nuc_fraction = [nuclide[1] for nuclide in nuclides]
nuc_data = []
for nuclide in nuclides.items():
nuc_data.append(_calculate_mgxs_nuc_macro(nuclide[0], types, library,
orders, T))
# Combine across the nuclides
data = np.zeros((len(types), library.energy_groups.num_groups))
for line in range(len(types)):
if types[line] == 'unity':
data[line, :] = 1.
else:
for n in range(len(nuclides)):
data[line, :] += nuc_fraction[n] * nuc_data[n][line, :]
return data
| mit |
glennq/scikit-learn | examples/cluster/plot_mean_shift.py | 351 | 1793 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
taknevski/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py | 62 | 2343 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kylerbrown/bark | bark/tools/datenvclassify.py | 2 | 5284 | import bark
import numpy as np
import pandas as pd
import os.path
from scipy.signal import filtfilt, butter
from scipy.io import wavfile
def abs_and_smooth(x, sr, lp=100):
abs_x = np.abs(x)
if len(x.shape) > 1:
abs_x = np.sum(abs_x,
axis=-1) # sum over last dimension eg sum over channels
b, a = butter(3, lp / 2 / sr, btype="low")
filtered_x = filtfilt(b, a, abs_x, axis=0)
return filtered_x
def myresample(x, old_sr, new_sr):
'''a dumb resampler that uses linear interpolation'''
duration = len(x) / old_sr
old_sample_times = np.arange(0, duration, 1 / old_sr)
new_sample_times = np.arange(0, duration, 1 / new_sr)
return np.interp(new_sample_times, old_sample_times, x)
def amplitude(x, sr, new_sr):
'''finds amplitude, resamples and demeans'''
x_amp = abs_and_smooth(x, sr)
x_resamp = myresample(x_amp, sr, new_sr)
return x_resamp
def wav_envelopes(wavnames, new_sr=22050):
''' From a list of wav files, find the envelope and resample them.
Returns --
names: a list of stimulus names
envelopes: a list of amplitude envelopes
'''
names = []
envelopes = []
for wav in wavnames:
name = os.path.splitext(os.path.basename(wav))[0]
names.append(name)
wavsr, wavdata = wavfile.read(wav)
amp_env = amplitude(wavdata, wavsr, new_sr)
envelopes.append(amp_env)
return names, envelopes
def classify_stimuli(mic_data, mic_sr, starts, wav_names, wav_envs, common_sr):
# get longest stimuli to determine how much data to grab
max_stim_duration = int(max([len(x) for x in wav_envs]) / common_sr *
mic_sr)
max_stim_dur_common_sr = max([len(x) for x in wav_envs])
padded_wav_envs = []
for y in wav_envs:
pad = np.zeros(max_stim_dur_common_sr)
pad[:len(y)] = y
padded_wav_envs.append(pad)
# convert trigfile starts to samples
start_samps = np.array(starts * mic_sr, dtype=int)
labels = []
for start_samp in start_samps:
x = amplitude(mic_data[start_samp:start_samp + max_stim_duration],
mic_sr, common_sr)
if len(x) < max_stim_dur_common_sr:
print('skipping {} ... too close to end of file'.format(start_samp
/ mic_sr))
continue
inner_prods = [pearson_r(x, y) for y in padded_wav_envs]
best_match = wav_names[np.argmax(inner_prods)]
labels.append(best_match)
return labels
def pearson_r(x, y):
return np.mean((x - np.mean(x)) *
(y - np.mean(y))) / (np.std(x) * np.std(y))
def get_stops(labels, starts, stim_names, stim_envs, sr):
'''
labels: sequence of identified stimuli names
starts: seqence of stimuli times, in seconds
stim_names: a vector of all stimulis names
stim_enves: a corresponding vector of stimulus envelopes
Returns a vector of times, indicating when the stimuli ended.'''
length_lookup = {name: len(env) / sr
for name, env in zip(stim_names, stim_envs)}
stops = [start + length_lookup[name]
for start, name in zip(starts, labels)]
return stops
def write(outfile, starts, stops, labels):
if len(labels) < len(starts):
print('warning, discarding {} events'.format(len(starts) - len(
labels)))
starts = starts[:len(labels)]
outdset = pd.DataFrame(dict(start=starts, stop=stops, name=labels))
columns = {'start': {'units': 's'},
'stop': {'units': 's'},
'name': {'units': None}}
bark.write_events(outfile, outdset, columns=columns)
def main(datfile, trigfile, outfile, wavfiles):
common_sr = 22050 # everything is resampled to this
# get wav envelopes
stim_names, stim_envs = wav_envelopes(wavfiles, common_sr)
mic_dset = bark.read_sampled(datfile)
mic_sr = mic_dset.sampling_rate
starts = bark.read_events(trigfile).data.start
# get most likely stimulus for each trigger time
labels = classify_stimuli(mic_dset.data, mic_sr, starts, stim_names,
stim_envs, common_sr)
stops = get_stops(labels, starts, stim_names, stim_envs, common_sr)
write(outfile, starts, stops, labels)
def _run():
''' Function for getting commandline args.'''
import argparse
p = argparse.ArgumentParser(description='''
Classify acoustic events by amplitude envelope. Uses a set of WAV files as
templates. Useful for recovering the identity of acoustic stimuli, when
their amplitude envelopes are significantly different. If not, use the stimulus
log to reconstruct stimulus identity.
''')
p.add_argument('dat', help='name of a sampled dataset')
p.add_argument('trig',
help='name of an event dataset containing stimuli times')
p.add_argument('out', help='name of output event dataset')
p.add_argument('-w',
'--wavs',
nargs='+',
help='source stimulus wav files',
required=True)
args = p.parse_args()
main(args.dat, args.trig, args.out, args.wavs)
if __name__ == '__main__':
_run()
| gpl-2.0 |
prisae/empymod | examples/reproducing/constable2006.py | 1 | 3538 | """
Constable and Weiss, 2006
=========================
Reproducing Figure 3 of Constable and Weiss, 2006, Geophysics. This is a marine
CSEM example.
**Reference**
- **Constable, S., and C. J. Weiss, 2006**, Mapping thin resistors and
hydrocarbons with marine EM methods: Insights from 1D modeling: Geophysics,
71, G43-G51; DOI: `10.1190/1.2187748 <http://dx.doi.org/10.1190/1.2187748>`_.
"""
import empymod
import numpy as np
from copy import deepcopy as dc
import matplotlib.pyplot as plt
###############################################################################
# Computation
# -----------
#
# Note: Exact reproduction is not possible, as source and receiver depths are
# not explicitly specified in the publication. I made a few checks, and it
# looks like a source-depth of 900 meter gives good accordance. Receivers are
# on the sea-floor.
# Offsets
x = np.linspace(0, 20000, 101)
# TG model
inp3 = {'src': [0, 0, 900],
'rec': [x, np.zeros(x.shape), 1000],
'depth': [0, 1000, 2000, 2100],
'res': [2e14, 0.3, 1, 100, 1],
'freqtime': 1,
'verb': 1}
# HS model
inp4 = dc(inp3)
inp4['depth'] = inp3['depth'][:2]
inp4['res'] = inp3['res'][:3]
# Compute radial responses
rhs = empymod.dipole(**inp4) # Step, HS
rhs = empymod.utils.EMArray(np.nan_to_num(rhs))
rtg = empymod.dipole(**inp3) # " " Target
rtg = empymod.utils.EMArray(np.nan_to_num(rtg))
# Compute azimuthal response
ahs = empymod.dipole(**inp4, ab=22) # Step, HS
ahs = empymod.utils.EMArray(np.nan_to_num(ahs))
atg = empymod.dipole(**inp3, ab=22) # " " Target
atg = empymod.utils.EMArray(np.nan_to_num(atg))
###############################################################################
# Plot
# ----
plt.figure(figsize=(9, 13))
plt.subplots_adjust(wspace=.3, hspace=.3)
# Radial amplitude
plt.subplot(321)
plt.title('(a) Radial mode fields')
plt.plot(x/1000, np.log10(rtg.amp()), 'k', label='Model')
plt.plot(x/1000, np.log10(rhs.amp()), 'k-.', label='Half-space response')
plt.axis([0, 20, -18, -8])
plt.xlabel('Range (km)')
plt.ylabel(r'Log$_{10}$(E-field magnitude, V/Am$^2$)')
plt.legend()
# Radial phase
plt.subplot(323)
plt.title('(b) Radial mode phase')
plt.plot(x/1000, rtg.pha(deg=True), 'k')
plt.plot(x/1000, rhs.pha(deg=True), 'k-.')
plt.axis([0, 20, -500, 0])
plt.xlabel('Range (km)')
plt.ylabel('Phase (degrees)')
# Azimuthal amplitude
plt.subplot(325)
plt.title('(c) Azimuthal mode fields')
plt.plot(x/1000, np.log10(atg.amp()), 'k', label='Model')
plt.plot(x/1000, np.log10(ahs.amp()), 'k-.', label='Half-space response')
plt.axis([0, 20, -18, -8])
plt.xlabel('Range (km)')
plt.ylabel(r'Log$_{10}$(E-field magnitude, V/Am$^2$)')
plt.legend()
# Azimuthal phase
plt.subplot(322)
plt.title('(d) Azimuthal mode phase')
plt.plot(x/1000, atg.pha(deg=True)+180, 'k')
plt.plot(x/1000, ahs.pha(deg=True)+180, 'k-.')
plt.axis([0, 20, -500, 0])
plt.xlabel('Range (km)')
plt.ylabel('Phase (degrees)')
# Normalized
plt.subplot(324)
plt.title('(e) Normalized E-field magnitude')
plt.plot(x/1000, np.abs(rtg/rhs), 'k', label='Radial')
plt.plot(x/1000, np.abs(atg/ahs), 'k--', label='Azimuthal')
plt.axis([0, 20, 0, 70])
plt.xlabel('Range (km)')
plt.legend()
plt.show()
###############################################################################
# Original Figure
# ---------------
#
# Figure 3 of Constable and Weiss, 2006, Geophysics:
#
# .. image:: ../../_static/figures/Constable2006.jpg
#
###############################################################################
empymod.Report()
| apache-2.0 |
Nyker510/scikit-learn | sklearn/feature_extraction/text.py | 36 | 49753 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
fzheng/codejam | lib/python2.7/site-packages/IPython/core/pylabtools.py | 1 | 13845 | # -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
from __future__ import print_function
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
from IPython.utils import py3compat
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'inline' : 'module://ipykernel.pylab.backend_inline'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = rcParams['savefig.dpi']
if dpi == 'figure':
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=dpi,
bbox_inches=bbox_inches,
)
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pylab as pylab
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if pylab.draw_if_interactive.called:
pylab.draw()
pylab.draw_if_interactive.called = False
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
from ipykernel.pylab import backend_inline
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, py3compat.string_types):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
if matplotlib.get_backend().lower() == 'nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
from matplotlib import pyplot
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = pyplot.rcParams[k]
# load inline_rc
pyplot.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
pyplot.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/ipykernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
| mit |
procoder317/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
arcyfelix/ML-DL-AI | Supervised Learning/Image Recognition/SimpleCNN/import_data.py | 1 | 3739 | import pandas as pd
import numpy as np
from tflearn.data_utils import to_categorical
from import_data import *
def import_csv(file_path, shuffle = False):
data = pd.read_csv(file_path)
print('*' * 70)
print('Import CSV file has been successful!')
if shuffle == True:
data.reindex(np.random.permutation(data.index))
print('The data has been shuffled!')
else:
print('The data has not been shuffled!')
return data
def labels_info(output_data):
labels_names = np.unique(output_data)
number_of_labels = labels_names.shape[0]
print('*' * 70)
print("Number of uniques categories:", number_of_labels)
labels_as_numbers = np.arange(number_of_labels)
print("Categories as numbers", labels_as_numbers)
for _ in labels_as_numbers:
print('Category ' + str(_) + ' is ' + str(labels_names[_]))
return number_of_labels
def labels_as_numbers(output_data):
_, output_data_as_numbers = np.unique(output_data, return_inverse=True)
return output_data_as_numbers
# -------------------------------------------------------------------------------
# Acquiring the data
def get_data_MNIST():
folder = 'Digit Recognizer'
file_name = 'train.csv'
specific_dataset_source = folder + '/' + file_name
output_columns = ['label']
data = import_csv(specific_dataset_source, shuffle = True)
# Data split into the input and output
x_data = data
y_data = np.array(data.pop('label'))
print('Shape of the input data:', x_data.shape)
print('Shape of the output data:', y_data.shape)
# Standalization
x_data = x_data / 255
num_samples = x_data.shape[0]
input_features = x_data.shape[1]
print('Number of samples:', num_samples)
print('Number of the input features:', input_features)
y_data_as_numbers = labels_as_numbers(y_data)
# Cross validation data preparation
split_percentage = 80
split_index = int(x_data.shape[0]/(100/split_percentage))
x_train = np.array(x_data[:split_index])
x_val = np.array(x_data[split_index:])
y_train = np.array(y_data_as_numbers[:split_index])
y_val = np.array(y_data_as_numbers[split_index:])
# Information about the data
print(x_train.shape)
print(x_val.shape)
print(y_train.shape)
print(y_val.shape)
# Shaping data into the correct shape.
x_train = x_train.reshape([-1, 28, 28, 1])
x_val = x_val.reshape([-1, 28, 28, 1])
y_train = to_categorical(y_train, nb_classes = 10)
y_val = to_categorical(y_val, nb_classes = 10)
return x_train, x_val, y_train, y_val
def get_data_MNIST_test():
# Loading the test data
file_name_test = 'test.csv'
folder = 'Digit Recognizer'
source = folder + '/' + file_name_test
data = pd.read_csv(source)
test_input = data.loc[:, :]
return test_input.as_matrix()
# Oxford Flowers Dataset
def get_data_oxford_flowers():
import tflearn.datasets.oxflower17 as oxflower17
X, Y = oxflower17.load_data(one_hot = True, resize_pics = (227, 227))
split_percentage = 80
split_index = int(X.shape[0]/(100/split_percentage))
x_train = np.array(X[:split_index])
x_val = np.array(X[split_index:])
y_train = np.array(Y[:split_index])
y_val = np.array(Y[split_index:])
return x_train, x_val, y_train, y_val
def get_data_CIFAR10(dataset = 'Train + Val'):
from tflearn.datasets import cifar10
(X, Y), (X_test, Y_test) = cifar10.load_data()
# Size is 32, 32, 3
split_percentage = 90
split_index = int(X.shape[0]/(100/split_percentage))
x_train = np.array(X[:split_index])
x_val = np.array(X[split_index:])
y_train = np.array(Y[:split_index])
y_val = np.array(Y[split_index:])
Y = to_categorical(Y, 10)
Y_test = to_categorical(Y_test, 10)
if dataset == 'Test':
return x_train, x_val, y_train, y_val
else:
return X_test, Y_test
| apache-2.0 |
fmaschler/networkit | scripts/DynamicBetweennessExperiments_fixed_batch.py | 3 | 4514 | from networkit import *
from networkit.dynamic import *
from networkit.centrality import *
import pandas as pd
import random
def isConnected(G):
cc = properties.ConnectedComponents(G)
cc.run()
return (cc.numberOfComponents() == 1)
def removeAndAddEdges(G, nEdges, tabu=None):
if nEdges > G.numberOfEdges() - tabu.numberOfEdges():
raise Error("G does not have enough edges")
# select random edges for removal
removed = set()
while len(removed) < nEdges:
(u, v) = G.randomEdge()
if not tabu.hasEdge(u, v) and not ((u,v) in removed or (v,u) in removed): # exclude all edges in the tabu graph
removed.add((u, v))
print (removed)
# build event streams
removeStream = []
for (u, v) in removed:
removeStream.append(GraphEvent(GraphEvent.EDGE_REMOVAL, u, v, 0))
addStream = []
for (u, v) in removed:
addStream.append(GraphEvent(GraphEvent.EDGE_ADDITION, u, v, G.weight(u, v)))
return (removeStream, addStream)
def setRandomWeights(G, mu, sigma):
"""
Add random weights, normal distribution with mean mu and standard deviation sigma
"""
for (u, v) in G.edges():
w = random.normalvariate(mu, sigma)
G.setWeight(u, v, w)
return G
def test(G, nEdges, batchSize, epsilon, delta, size):
# find a set of nEdges to remove from G
T = graph.SpanningForest(G).generate()
(removeStream, addStream) = removeAndAddEdges(G, nEdges, tabu=T)
# remove the edges from G
updater = dynamic.GraphUpdater(G)
updater.update(removeStream)
# run the algorithms on the inital graph
print("--- IS G CONNECTED? ")
print(isConnected(G))
bc = Betweenness(G)
print("Running bc")
bc.run()
dynBc = DynBetweenness(G, True)
print("Running dyn bc with predecessors")
dynBc.run()
apprBc = ApproxBetweenness(G, epsilon, delta)
print("Running approx bc")
apprBc.run()
dynApprBc = DynApproxBetweenness(G, epsilon, delta, True)
print("Running dyn approx bc with predecessors")
dynApprBc.run()
# apply the batches
nExperiments = nEdges // batchSize
timesBc = []
timesDynBc = []
timesApprBc = []
timesDynApprBc = []
scoresBc = []
scoresApprBc = []
for i in range(nExperiments):
batch = addStream[i*batchSize : (i+1)*batchSize]
# add the edges of batch to the graph
print("GRAPH SIZE")
print(size)
totalTime = 0.0
for j in range(0, batchSize):
updater.update([batch[j]])
# update the betweenness with the dynamic exact algorithm
if size <= 2**15:
t = stopwatch.Timer()
dynBc.update(batch[j])
totalTime += t.stop()
else:
totalTime = -1
timesDynBc.append(totalTime)
# update the betweenness with the static exact algorithm
t = stopwatch.Timer()
bc.run()
x = t.stop()
timesBc.append(x)
print("Exact BC")
print(x)
print("Speedup Dyn BC (with preds)")
print(x/totalTime)
# update the betweenness with the static approximated algorithm
t = stopwatch.Timer()
apprBc.run()
x = t.stop()
timesApprBc.append(x)
print("ApprBC")
print(x)
# update the betweenness with the dynamic approximated algorithm
t = stopwatch.Timer()
dynApprBc.update(batch)
y = t.stop()
timesDynApprBc.append(y)
print("Speedup DynApprBC (with preds)")
print(x/y)
bcNormalized = [ k/(size*(size-1)) for k in bc.scores()]
scoresBc.append(bcNormalized)
scoresApprBc.append(dynApprBc.scores())
a = pd.Series(timesBc)
b = pd.Series(timesDynBc)
c = pd.Series(timesApprBc)
d = pd.Series(timesDynApprBc)
df1 = pd.DataFrame({"Static exact bc": a, "Dynamic exact bc" : b, "Static approx bc" : c, "Dynamic approx bc" : d})
dic2 = {}
for experiment in range(nExperiments):
a = pd.Series(scoresBc[experiment])
b = pd.Series(scoresApprBc[experiment])
dic2["Exact scores (exp. "+str(experiment)+")"] = a
dic2["Approx scores (exp. "+str(experiment)+")"] = b
df2 = pd.DataFrame(dic2)
return df1, df2
if __name__ == "__main__":
setNumberOfThreads(1)
# setLogLevel("INFO")
batchSize = 128
for i in range(10,21):
size = 2**i
G = generators.DorogovtsevMendesGenerator(size).generate()
G1 = Graph(G.numberOfNodes(), True, False)
for e in G.edges():
G1.addEdge(e[0], e[1], 1.0)
G1 = setRandomWeights(G1, 1, 0.1)
if (isConnected(G1)) :
nEdges = batchSize * 5
epsilon = 0.05
delta = 0.1
(df1, df2) = test(G1, nEdges, batchSize, epsilon, delta, size)
df1.to_csv("results_fixed_batch/times_weighted_size_"+str(size)+"_batch_"+str(batchSize)+".csv")
df2.to_csv("results_fixed_batch/scores_weighted_size_"+str(size)+"_batch_"+str(batchSize)+".csv")
else:
print("The generated graph is not connected.")
| mit |
tosolveit/scikit-learn | sklearn/tests/test_dummy.py | 186 | 17778 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
| bsd-3-clause |
bnaul/scikit-learn | examples/cluster/plot_agglomerative_dendrogram.py | 23 | 1732 | # Authors: Mathew Kallada, Andreas Mueller
# License: BSD 3 clause
"""
=========================================
Plot Hierarchical Clustering Dendrogram
=========================================
This example plots the corresponding dendrogram of a hierarchical clustering
using AgglomerativeClustering and the dendrogram method available in scipy.
"""
import numpy as np
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram
from sklearn.datasets import load_iris
from sklearn.cluster import AgglomerativeClustering
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples under each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
iris = load_iris()
X = iris.data
# setting distance_threshold=0 ensures we compute the full tree.
model = AgglomerativeClustering(distance_threshold=0, n_clusters=None)
model = model.fit(X)
plt.title('Hierarchical Clustering Dendrogram')
# plot the top three levels of the dendrogram
plot_dendrogram(model, truncate_mode='level', p=3)
plt.xlabel("Number of points in node (or index of point if no parenthesis).")
plt.show()
| bsd-3-clause |
arjunkhode/ASP | lectures/06-Harmonic-model/plots-code/spectral-peaks.py | 22 | 1161 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512*2
M = 511
t = -60
w = np.hamming(M)
start = .8*fs
hN = N/2
hM = (M+1)/2
x1 = x[start:start+M]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs*np.arange(mX.size)/float(N)
plt.figure(1, figsize=(9, 6))
plt.subplot (2,1,1)
plt.plot(freqaxis, mX,'r', lw=1.5)
plt.axis([0,7000,-80,max(mX)+1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + peaks (oboe-A4.wav)')
plt.subplot (2,1,2)
plt.plot(freqaxis, pX,'c', lw=1.5)
plt.axis([0,7000, min(pX),10])
plt.plot(fs * iploc/N, ipphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + peaks')
plt.tight_layout()
plt.savefig('spectral-peaks.png')
plt.show()
| agpl-3.0 |
MikeDMorgan/proj036 | src/PipelineProject036.py | 1 | 141053 | '''
PipelineProjec036.py
====================
- Classes and functions used in pipeline_project036_timeseries.py
==================================================================
'''
################
# import modules
################
import sys
import glob
import gzip
import os
import itertools
import re
import math
import types
import collections
import time
import optparse, shutil
import sqlite3
import random
import tempfile
import numpy as np
import pandas as pd
from pandas.io import sql
import rpy2.rinterface as rinterface
from rpy2.robjects import pandas2ri
from rpy2.robjects import r as R
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri as numpy2ri
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.GTF as GTF
import CGATPipelines.Pipeline as P
import CGATPipelines.PipelineTimeseries as TS
import mygene
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import subprocess
import shlex
###########
# functions
###########
@P.cluster_runnable
def candidateCeRNAs(counts_file,
shared_file,
lncs_gtf,
gene_gtf,
annotations,
threshold,
expr_file,
outfile):
'''
Select candidate ceRNAs. Top MRE dense and miRNA sharing
highly correlated lncRNA: gene pairs.
Output heatmaps: expression profile of top lncRNAs and genes and
normalised cross-correlation matrix
'''
counts_df = pd.read_table(counts_file,
sep="\t", header=None,
index_col=0)
counts_df.columns = ['MRE_counts']
counts_df.index.name = 'gene_id'
shared_df = pd.read_table(shared_file,
sep="\t", header=0,
index_col=0)
# only used when selecting a proportion of candidate ceRNAs
# N = len(shared_df)
if annotations.endswith("gz"):
ann_comp = "gzip"
else:
ann_comp = None
annotate_df = pd.read_table(annotations, sep="\t",
compression=ann_comp, index_col=0,
header=0)
# take highly correlated annotated gene: lncRNA pair, r>=threshold
high_cor = annotate_df[annotate_df['value'] >= threshold]
high_cor['gene_id'] = high_cor.index
# select all miRNA sharing lncRNA:gene pairs
top_shared = shared_df.sort(columns='total_shared',
ascending=False)
top_shared = top_shared[top_shared['total_shared'] > 0]
genes = set(shared_df.index)
lncs = set(shared_df['lncRNA_id'])
# calculate gene and lncRNA lengths for MRE density
gene_file = IOTools.openFile(gene_gtf, "rb")
lnc_file = IOTools.openFile(lncs_gtf, "rb")
gene_it = GTF.transcript_iterator(GTF.iterator(gene_file))
lnc_it = GTF.transcript_iterator(GTF.iterator(lnc_file))
len_dict = {}
for git in gene_it:
for trans in git:
gid = trans.gene_id
if gid in genes:
try:
len_dict[gid] += trans.end - trans.start
except KeyError:
len_dict[gid] = trans.end - trans.start
else:
pass
for lit in lnc_it:
for tran in lit:
lid = tran.gene_id
if lid in lncs:
try:
len_dict[lid] += tran.end - tran.start
except KeyError:
len_dict[lid] = tran.end - tran.start
else:
pass
counts_df['length'] = pd.Series(len_dict, dtype=np.float64)
counts_df['density'] = (counts_df['MRE_counts']/counts_df['length']) * 1000
lnc_counts = counts_df.loc[lncs]
# n = len(lnc_counts)
top_density = lnc_counts.sort(columns='density',
ascending=False)
dense_lncs = set(top_density.index)
shared_lncs = set(top_shared['lncRNA_id'])
shared_genes = set(top_shared.index)
shared_genes = [sg for sg in shared_genes]
# select intersection and get annotations
inter = dense_lncs.intersection(shared_lncs)
# if none of the intersecting genes/lncRNAs are in the highly correlated
# set then break out and give appropriate message.
try:
high_lnc = high_cor.loc[shared_genes]
except KeyError:
E.warn("None of the intersecting genes found in the highly "
"correlated geneset. Ending here")
P.touch(outfile)
return 0
# remove an NA's
high_lnc = high_lnc.loc[np.isfinite(high_lnc['value'])]
high_lnc.index = high_lnc['lncRNA_id']
if not len(inter):
P.touch(outfile)
return 0
else:
pass
# check if highly correlated lncs, MRE dense and those sharing miRNAs
# intersect - break out if they don't
if len(inter.intersection(high_lnc.index)):
top_df = high_lnc.loc[inter]
top_df = top_df.loc[np.isfinite(top_df['value'])]
top_df.index = [x for x, y in enumerate(top_df.index)]
else:
E.warn("Highly correlated lncRNA set does not intersect those "
"sharing miRNAs or are MRE dense. Ending here")
P.touch(outfile)
return 0
candidates = set(top_df['lncRNA_id'])
# get expression from condition-vst data
if expr_file.endswith("gz"):
expr_comp = "gzip"
else:
expr_comp = None
expr_df = pd.read_table(expr_file, sep="\t",
header=0, index_col=0,
compression=expr_comp)
cand_expr = expr_df.loc[candidates]
cand_merge = pd.merge(left=top_df, right=cand_expr,
left_on="lncRNA_id", right_index=True,
how='inner')
# get gene symbols from MyGene.info
mg = mygene.MyGeneInfo()
try:
mg_out = mg.querymany(cand_merge['gene_id'].tolist(),
scopes="ensemblgene",
fields="symbol",
species="mouse", returnall=True)['out']
except AssertionError:
mg_out = mg.querymany(cand_merge['gene_id'].tolist(),
scopes="ensemblgene",
fields="symbol",
species="mouse", returnall=True)['out']
mg_df = pd.DataFrame(mg_out)
symbol_df = pd.merge(left=cand_merge, right=mg_df,
left_on='gene_id', right_on='query')
try:
symbol_df.drop(['notfound', '_id', 'query'], inplace=True,
axis=1)
except ValueError:
try:
symbol_df.drop(['notfound', 'query'], inplace=True,
axis=1)
except ValueError:
try:
symbol_df.drop(['_id', 'query'], inplace=True,
axis=1)
except ValueError:
symbol_df.drop(['query'], inplace=True, axis=1)
# replace ensembl ids with gene symbols for correlated genes
E.info("matching ensembl gene ids to gene symbols")
symbols = symbol_df[['symbol', 'gene_id']]
symbols.index = symbols['gene_id']
symbols.drop_duplicates(subset='gene_id', take_last=True, inplace=True)
cand_df = pd.merge(left=cand_merge, right=symbols,
left_on="gene_id", right_on="gene_id",
how='inner')
# remove duplicate entries
next_df = cand_df.drop_duplicates(subset=['lncRNA_id', 'gene_id'],
take_last=True)
# select ceRNAs with highest expression, i.e. max(VST expression) >= 8.0
# only use 0-72 hours
E.info("filter lncRNAs on expression level")
expr48_df = expr_df.iloc[:, 1:8]
# try 48 hour expresion >= 8
high_expr = expr48_df.apply(max, axis=1) > 8
next_df.index = next_df['lncRNA_id']
out_df = next_df.loc[high_expr]
idxs = [iqx for iqx, iqy in enumerate(out_df.index)]
out_df.index = idxs
# check not all lncRNAs have been filtered out
if len(out_df):
out_df.to_csv(outfile, sep="\t", index_label="idx")
else:
E.warn("These are not the lncRNAs you are looking for."
"No lncRNAs left in list, relax upstream parameters"
" to allow for downstream filtering")
P.touch(outfile)
if len(set(out_df['lncRNA_id'])) < 2:
return 0
else:
pass
# generate normalised cross-correlation matrix of candidate ceRNAs
E.info("Computing normalised cross-correlation matrix")
lnc_ids = out_df['lncRNA_id'].values
n_ids = set([lx for lx in lnc_ids])
cor_df = pd.DataFrame(index=n_ids, columns=n_ids)
cor_df.fillna(0.0, inplace=True)
E.info("Calculating cross-correlations "
"%i calculations" % (len(n_ids)*len(n_ids)))
for pair in itertools.product(n_ids, n_ids):
v1 = expr_df.loc[pair[0]].tolist()
v2 = expr_df.loc[pair[1]].tolist()
corr = TS.crossCorrelate(v1, v2, lag=0)[0]
cor_df.loc[pair[0]][pair[1]] = corr
E.info("Matching ceRNAs to gene symbols")
new_sym = []
for k in cor_df.index:
if re.search("LNC", k):
new_sym.append(k)
else:
vals = symbol_df['symbol'][symbol_df['gene_id'] == k].values[0]
new_sym.append(vals)
cor_df.index = new_sym
cor_df.columns = new_sym
# remove duplicates
cand_merge['dups'] = cand_merge.index
cand_merge.drop_duplicates(subset='dups', take_last=True,
inplace=True)
cand_merge.drop(['dups'], axis=1, inplace=True)
cand_lncs = cand_merge.index
r_cor = pandas2ri.py2ri_pandasdataframe(cor_df)
r_expr = pandas2ri.py2ri_pandasdataframe(cand_merge.iloc[:, 3:])
r_lncs = ro.StrVector([r for r in cand_lncs])
R.assign("cor.df", r_cor)
R.assign("expr.df", r_expr)
R.assign("r.lncs", r_lncs)
cond = outfile.split("/")[-1].split("-")[0]
R('''suppressPackageStartupMessages(library(gplots))''')
R('''suppressPackageStartupMessages(library(RColorBrewer))''')
R('''colnames(expr.df) <- c(0, 1, 3, 6, 12, 24, 48, 72, 96, 120)''')
R('''rownames(expr.df) <- r.lncs''')
R('''hmcol <- colorRampPalette(brewer.pal(9, "BuPu"))(100)''')
R('''cor_col <- colorRampPalette(brewer.pal(9, "PuOr"))(100)''')
E.info("Generating heatmap of ceRNA expression")
# heatmap of expression
R('''png("images.dir/%s-candidates-heatmap.png", '''
'''height=1600, width=1600)''' % cond)
R('''heatmap.2(as.matrix(expr.df), trace="none", col=hmcol, '''
'''density.info="none", Colv=colnames(expr.df), margins=c(6,12), '''
'''dendrogram="none", cexRow=2, cexCol=2)''')
R('''dev.off()''')
E.info("Generating heatmap of cross-correlations")
# heatmap of correlations
R('''png("images.dir/%s-candidates-correlation_heatmap.png",'''
'''height=1600, width=1600)''' % cond)
R('''heatmap.2(as.matrix(cor.df), trace="none", col=cor_col, '''
'''density.info="none", dendrogram="none", margins=c(10, 10), '''
'''cexRow=2, cexCol=2)''')
R('''dev.off()''')
@P.cluster_runnable
def annotateCeRNAs(ceRNA_file,
lnc_gtf,
mre_file,
outfile):
'''
Annotate putative ceRNAs from reference gtf file,
including which MREs are shared.
'''
# merge input ceRNAs with shared MRE information
if mre_file.endswith("gz"):
mre_comp = "gzip"
else:
mre_comp = None
if ceRNA_file.endswith("gz"):
cerna_comp = "gzip"
else:
cerna_comp = None
# handle bug when all ceRNAs are filtered out
if IOTools.isEmpty(ceRNA_file):
E.warn("No ceRNAs to parse, exiting")
P.touch(outfile)
return 0
else:
pass
mre_df = pd.read_table(mre_file, sep="\t", header=0,
index_col=None, compression=mre_comp)
cerna_df = pd.read_table(ceRNA_file, sep="\t", header=0,
index_col=0, compression=cerna_comp)
matched = pd.merge(left=cerna_df, right=mre_df,
left_on=["gene_id", "lncRNA_id", "symbol"],
right_on=["gene_id", "lncRNA_id", "symbol"],
how='inner')
matched = matched.drop_duplicates(subset=['gene_id', 'lncRNA_id', 'value'],
take_last=True)
# drop ceRNA pairs that do not share any miRNAs
shared_df = matched[matched['total_shared'] != 0]
lnc_open = IOTools.openFile(lnc_gtf, "rb")
lnc_it = GTF.transcript_iterator(GTF.iterator(lnc_open))
lncs = set(shared_df['lncRNA_id'])
# get genome coordinates of ceRNAs
lnc_dict = {}
for lit in lnc_it:
for lnc in lit:
if lnc.gene_id in lncs:
try:
lnc_dict[lnc.gene_id]['contig'] = lnc.contig
lnc_dict[lnc.gene_id]['start'].append(lnc.start)
lnc_dict[lnc.gene_id]['end'].append(lnc.end)
lnc_dict[lnc.gene_id]['strand'].add(lnc.strand)
except KeyError:
lnc_dict[lnc.gene_id] = {'contig': set(lnc.contig),
'start': [lnc.start],
'end': [lnc.end],
'strand': set(lnc.strand)}
else:
pass
shared_df.index = shared_df['lncRNA_id']
lcontig_dict = {}
lstart_dict = {}
lend_dict = {}
lstrand_dict = {}
for l_each in lnc_dict.keys():
lcontig_dict[l_each] = lnc_dict[l_each]['contig']
lstart_dict[l_each] = min(lnc_dict[l_each]['start'])
lend_dict[l_each] = max(lnc_dict[l_each]['end'])
lstrand_dict[l_each] = [ls for ls in lnc_dict[l_each]['strand']][-1]
shared_df.loc[:, 'lncRNA_contig'] = pd.Series(lcontig_dict,
index=shared_df.index)
shared_df.loc[:, 'lncRNA_start'] = pd.Series(lstart_dict,
index=shared_df.index)
shared_df.loc[:, 'lncRNA_end'] = pd.Series(lend_dict,
index=shared_df.index)
shared_df.loc[:, 'lncRNA_strand'] = pd.Series(lstrand_dict,
index=shared_df.index)
shared_df.sort(['gene_id', 'lncRNA_contig'],
ascending=True, inplace=True)
shared_df.index = [x for x, y in enumerate(shared_df.index)]
try:
shared_df.drop(['_id'], inplace=True, axis=1)
except ValueError:
pass
shared_df.to_csv(outfile, sep="\t", index_label="indx")
def netExpressionFiltering(expression,
direction):
'''
Calculate net expression over a time course, first 48-72hours
as mean fold change between consecutive time points > 1
Alternative: check sum of fold changes > 1
'''
timepoints = expression.index
change_list = []
for t in range(0, len(timepoints)):
if t == 0:
# t0 = expression.loc[timepoints[t]]
pass
else:
t1 = expression.loc[timepoints[t-1]]
t2 = expression.loc[timepoints[t]]
fchange = t2/t1
change_list.append(fchange)
net_change = np.mean(change_list)
if direction == "up":
if net_change >= 1:
return 1
else:
return 0
elif direction == "down":
if net_change <= 1:
return 1
else:
return 0
else:
raise ValueError("unknown direction of expression change"
" on which to filter")
@P.cluster_runnable
def filterAnnotatedCeRNAs(cerna_file,
expression_file,
direction,
outfile):
'''
Filter annotated ceRNAs based on direction of expression,
i.e. up-regulated or down-regulated
'''
# handle bug when all ceRNAs are filtered out
if IOTools.isEmpty(cerna_file):
E.warn("No ceRNAs to parse, exiting")
P.touch(outfile)
return 0
else:
pass
if expression_file.endswith("gz"):
expr_comp = "gzip"
else:
expr_comp = None
expr_df = pd.read_table(expression_file, sep="\t", header=0,
index_col=0, compression=expr_comp)
cerna_df = pd.read_table(cerna_file, sep="\t", header=0,
index_col=0)
cerna_df.index = cerna_df['lncRNA_id']
filter_dict = {}
expr_df = expr_df.iloc[:, 1:7]
for lnc in cerna_df.index:
expr = expr_df.loc[lnc]
if netExpressionFiltering(expr, direction):
filter_dict[lnc] = cerna_df.loc[lnc]
else:
pass
# may return a dictionary of dataframes, append all together
lnc_keys = filter_dict.keys()
if len(lnc_keys):
df0 = filter_dict[lnc_keys[0]]
lnc_keys.pop(0)
for lncdf in lnc_keys:
df1 = filter_dict[lncdf]
df0 = df0.append(df1)
filter_df = df0
filter_df.index = [x for x, y in enumerate(filter_df.index)]
filter_df.to_csv(outfile, sep="\t", index_label="indx")
else:
E.warn("No ceRNAs to filter on expression direction.")
P.touch(outfile)
@P.cluster_runnable
def annotateGeneExpr(cernas, expression, outfile):
'''
Annotate ceRNAs with expressiond data for partner protein-coding genes
'''
if cernas.endswith("gz"):
cerna_comp = "gzip"
else:
cerna_comp = None
if expression.endswith("gz"):
expr_comp = "gzip"
else:
expr_comp = None
try:
cerna_df = pd.read_table(cernas, sep="\t", header=0, index_col=0,
compression=cerna_comp)
except ValueError:
E.warn("no ceRNA candidates to parse. Exiting")
P.touch(outfile)
return 0
expr_df = pd.read_table(expression, sep="\t", header=0, index_col=0,
compression=expr_comp)
genes = cerna_df['gene_id'].tolist()
gene_expr = expr_df.loc[genes]
cerna_lncs = cerna_df[['lncRNA_id', 'value',
'gene_id', 'symbol', 'shared_miRNAs']]
merge_df = pd.merge(left=cerna_lncs, right=gene_expr, left_on='gene_id',
right_index=True, how='inner')
merge_df.drop_duplicates(subset=['gene_id', 'lncRNA_id', 'symbol'],
take_last=True, inplace=True)
merge_df.to_csv(outfile, index_label="indx", sep="\t")
@P.cluster_runnable
def mergeStatTable(file_list, outfile):
'''
merge statistical testing tables
'''
df = pd.read_table(file_list[0], sep="\t", header=0, index_col=0)
file_list.pop(0)
for fle in file_list:
df_ = pd.read_table(fle, sep="\t", header=0, index_col=0)
# remove duplicated rows
df.drop_duplicates(inplace=True)
df = df.append(df_)
df.to_csv(outfile, sep="\t", index_label="comparison")
@P.cluster_runnable
def annotateGeneList(infile,
lnc_gtf,
outfile):
'''
Annotate a list of correlated genes and lncRNAs with gene
symbols and lncRNA genome co-ordinates
'''
if infile.endswith("gz"):
comp = "gzip"
else:
comp = None
cor_df = pd.read_table(infile, sep="\t", header=0,
index_col=None, compression=comp)
# need to figure out which column contains the gene ids
gene_re = re.compile("ENS")
lnc_re = re.compile("LNC")
# which ever column has majority ensembl IDs is the gene ID column
# there are only 2 columns to choose from gene_id and lncRNA_id
lnc_col_search = sum([1 for lx in cor_df['lncRNA_id'] if re.search(gene_re, lx)])
gene_col_search = sum([1 for gx in cor_df['gene_id'] if re.search(gene_re, gx)])
if gene_col_search > lnc_col_search:
gene_col = 'gene_id'
lnc_col = 'lncRNA_id'
elif gene_col_search < lnc_col_search:
gene_col = 'lncRNA_id'
lnc_col = 'gene_id'
else:
raise ValueError("Unable to determine gene ID column")
gene_set = cor_df[gene_col].tolist()
genes = [g for g in set(gene_set)]
mg = mygene.MyGeneInfo()
# can throw AssertionError if too many queries at once
# a single retry usually works
try:
mg_out = mg.querymany(genes, scopes="ensemblgene", fields="symbol",
species="mouse", returnall=True)['out']
except AssertionError:
mg_out = mg.querymany(genes, scopes="ensemblgene", fields="symbol",
species="mouse", returnall=True)['out']
mg_df = pd.DataFrame(mg_out)
mg_df.drop_duplicates(subset="query", take_last=True, inplace=True)
merged = pd.merge(left=cor_df, right=mg_df,
how='left', left_on="gene_id",
right_on='query')
try:
merged.drop(['_id', 'notfound', 'query'], inplace=True, axis=1)
except ValueError:
try:
merged.drop(['_id', 'query'], inplace=True, axis=1)
except ValueError:
merged.drop(['query'], inplace=True, axis=1)
# get lncRNA co-ordinates from file
lnc_dict = {}
lnc_file = IOTools.openFile(lnc_gtf, "rb")
lnc_it = GTF.transcript_iterator(GTF.iterator(lnc_file))
for gene in lnc_it:
start = []
end = []
for trans in gene:
start.append(trans.start)
end.append(trans.end)
strand = trans.strand
contig = trans.contig
gid = trans.gene_id
lnc_class = trans.source
exon_class = trans.asDict()['exon_status']
lnc_dict[gid] = {'contig': contig,
'start': min(start),
'end': max(end),
'strand': strand,
'lnc_class': lnc_class,
'exon_class': exon_class}
lnc_file.close()
lnc_df = pd.DataFrame(lnc_dict).T
try:
annotated = pd.merge(left=merged, right=lnc_df,
left_on='lncRNA', right_index=True,
how='left')
except KeyError:
annotated = pd.merge(left=merged, right=lnc_df,
left_on=lnc_col, right_index=True,
how='left')
# drop cluster info if contained in dataframe
try:
annotated.drop(['gene_cluster', 'lncRNA_cluster'],
inplace=True, axis=1)
except ValueError:
pass
columns = ['lncRNA_id', 'ensembl_id', 'correlation',
'gene_symbol', 'chromosome', 'lnc_end',
'lnc_exonic', 'lnc_class', 'lnc_start',
'lnc_strand']
annotated.columns = columns
sort_cols = ['ensembl_id', 'gene_symbol',
'correlation', 'lncRNA_id',
'chromosome', 'lnc_start', 'lnc_end', 'lnc_strand',
'lnc_class', 'lnc_exonic']
annotated = annotated[sort_cols]
annotated.to_csv(outfile, sep="\t",
index_label="idx")
@P.cluster_runnable
def lncsPerGene(infile, outfile):
'''
count the number of lncRNAs correlated with each protein-coding gene.
'''
if infile.endswith("gz"):
comp = "gzip"
else:
comp = None
df = pd.read_table(infile, sep="\t", header=0,
index_col=0, compression=comp)
genes = set(df['ensembl_id'])
gdict = {}
for gene in genes:
gdf = df[df['ensembl_id'] == gene]
cor_lncs = set(gdf['lncRNA_id'])
gdict[gene] = len(cor_lncs)
gser = pd.Series(gdict)
gser.columns = ['counts']
gser.to_csv(outfile, sep="\t", index_label="ensembl_id")
@P.cluster_runnable
def genesPerLnc(infile, outfile):
'''
count the number of genes correlated with each lncRNAs,
subset by lncRNA class
'''
if infile.endswith("gz"):
comp = "gzip"
else:
comp = None
df = pd.read_table(infile, sep="\t", header=0,
index_col=0, compression=comp)
lncs = set(df['lncRNA_id'])
ldict = {}
for lnc in lncs:
ldf = df[df['lncRNA_id'] == lnc]
lnc_class = [str(c) for c in set(ldf['lnc_class'])][0]
cor_genes = set(ldf['ensembl_id'])
ldict[lnc] = {'n_genes': len(cor_genes),
'lnc_class': lnc_class}
sum_df = pd.DataFrame(ldict).T
sum_df.to_csv(outfile, sep="\t", index_label="lncRNA_id")
@P.cluster_runnable
def plotGeneCounts(infile, outfile):
'''
Plot counts of lncRNAs per gene
'''
pandas2ri.activate()
df = pd.read_table(infile, sep="\t", header=None, index_col=None)
df.columns = ["gene", "counts"]
r_df = pandas2ri.py2ri_pandasdataframe(df)
R.assign("g.df", r_df)
R('''library(ggplot2)''')
R('''g.df$counts <- as.numeric(as.character(g.df$counts))''')
R('''p_gene <- ggplot(g.df, aes(x=counts)) + '''
'''geom_histogram(colour="black", fill="coral", binwidth=5) + '''
'''labs(x="N correlated lncRNAs per gene")''')
R('''png("%s", height=480, width=480)''' % outfile)
R('''print(p_gene)''')
R('''dev.off()''')
@P.cluster_runnable
def plotLncCounts(infile, outfile):
'''
Plot ggplot histogram of number of lncRNAs correlated per gene
'''
pandas2ri.activate()
df = pd.read_table(infile, sep="\t", header=0, index_col=None)
r_df = pandas2ri.py2ri_pandasdataframe(df)
R.assign("r.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''r.df$lnc_class <- as.factor(r.df$lnc_class)''')
R('''p_lnc <- ggplot(r.df, aes(x=n_genes, colour=lnc_class, '''
'''fill=lnc_class)) + geom_density(alpha=0.5) + '''
'''facet_grid(lnc_class ~ .) + '''
'''labs(x="N correlated genes per lncRNA")''')
R('''png('%s', height=480, width=480)''' % outfile)
R('''print(p_lnc)''')
R('''dev.off()''')
@P.cluster_runnable
def plotCorrelations(cor_file, rand_file, prox_file, anti_file, outfile):
'''
Plot distributions of correlation coefficients across gene sets
'''
cor_df = pd.read_table(cor_file, sep="\t", index_col=None,
header=0)
cor_df.columns = ['gene_id', 'correlation', 'lncRNA_id']
prox_df = pd.read_table(prox_file, sep="\t", index_col=None,
header=0)
rand_df = pd.read_table(rand_file, sep="\t", index_col=None,
header=0)
anti_df = pd.read_table(anti_file, sep="\t", index_col=None,
header=0)
cor_df['cat'] = "correlated"
prox_df['cat'] = "proximal"
rand_df['cat'] = "random"
anti_df['cat'] = "anticorrelated"
all_df = cor_df.append(prox_df)
all_df = all_df.append(rand_df)
all_df = all_df.append(anti_df)
all_idx = [i for i, t in enumerate(all_df.index)]
all_df.index = all_idx
r_all = pandas2ri.py2ri_pandasdataframe(all_df)
R.assign("r.df", r_all)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''r.df$cat <- as.factor(r.df$cat)''')
R('''p_cor <- ggplot(r.df, aes(x=correlation, fill=cat, colour=cat)) + '''
'''geom_density(alpha=0.2) + labs(x="Normalised cross-correlation") + '''
'''theme_bw() + theme(text=element_text(size=18, colour="black")) + '''
'''guides(colour=guide_legend(title="lncRNA set"), '''
'''fill=guide_legend(title="lncRNA set"))''')
R('''png("%s", height=480, width=480)''' % outfile)
R('''print(p_cor)''')
R('''dev.off()''')
@P.cluster_runnable
def plotSigMREs(cor, prox, random, outfile):
'''
Density plot of correlated, proximal and random lncRNA:gene
pairs proprotion of statisticall significantly conserved MREs
'''
if cor.endswith("gz"):
cor_comp = "gzip"
else:
cor_comp = None
if prox.endswith("gz"):
prox_comp = "gzip"
else:
prox_comp = None
if random.endswith("gz"):
rand_comp = "gzip"
else:
rand_comp = None
cor_df = pd.read_table(cor, sep="\t", index_col=None,
header=0, compression=cor_comp,
comment='#')
prox_df = pd.read_table(prox, sep="\t", index_col=None,
header=0, compression=prox_comp,
comment='#')
rand_df = pd.read_table(random, sep="\t", index_col=None,
header=0, compression=rand_comp,
comment='#')
cor_idx = cor_df.index
lnc_tag = [x for x in cor_idx if re.search("LNC", cor_df.loc[x]['target'])]
lnc_cor_df = cor_df.loc[lnc_tag]
g_tag = [g for g in cor_idx if re.search("ENS", cor_df.loc[g]['target'])]
gene_cor_df = cor_df.loc[g_tag]
pro_idx = prox_df.index
pro_ln = [p for p in pro_idx if re.search("LNC", prox_df.loc[p]['target'])]
lnc_prox_df = prox_df.loc[pro_ln]
pro_g = [w for w in pro_idx if re.search("ENS", prox_df.loc[w]['target'])]
gene_prox_df = prox_df.loc[pro_g]
ran_idx = rand_df.index
ran_ln = [r for r in ran_idx if re.search("LNC", rand_df.loc[r]['target'])]
lnc_rand_df = rand_df.loc[ran_ln]
ran_g = [d for d in ran_idx if re.search("ENS", rand_df.loc[d]['target'])]
gene_rand_df = rand_df.loc[ran_g]
gprox_df = summMreCons(gene_prox_df)
lprox_df = summMreCons(lnc_prox_df)
grand_df = summMreCons(gene_rand_df)
lrand_df = summMreCons(lnc_rand_df)
gcor_df = summMreCons(gene_cor_df)
lcor_df = summMreCons(lnc_cor_df)
gprox_df['biotype'] = "gene"
lprox_df['biotype'] = "lncRNA"
grand_df['biotype'] = "gene"
lrand_df['biotype'] = "lncRNA"
gcor_df['biotype'] = "gene"
lcor_df['biotype'] = "lncRNA"
cor_sum = gcor_df.append(lcor_df)
cor_sum['cat'] = "correlated"
lcor_df['cat'] = "correlated"
gcor_df['cat'] = "correlated"
prox_sum = gprox_df.append(lprox_df)
prox_sum['cat'] = "proximal"
gprox_df['cat'] = "proximal"
lprox_df['cat'] = "proximal"
rand_sum = grand_df.append(lrand_df)
rand_sum['cat'] = "random"
grand_df['cat'] = "random"
lrand_df['cat'] = "random"
all_sum = cor_sum.append(prox_sum)
all_sum = all_sum.append(rand_sum)
all_idx = [ix for ix, y in enumerate(all_sum.index)]
all_sum.index = all_idx
r_sum = pandas2ri.py2ri_pandasdataframe(all_sum)
R.assign("r.sum", r_sum)
all_lncs = lcor_df.append(lprox_df)
all_lncs = all_lncs.append(lrand_df)
l_idx = [lx for lx, p in enumerate(all_lncs.index)]
all_lncs.index = l_idx
r_lncs = pandas2ri.py2ri_pandasdataframe(all_lncs)
R.assign("r.lncs", r_lncs)
all_genes = gcor_df.append(gprox_df)
all_genes = all_genes.append(grand_df)
g_idx = [gx for gx, b in enumerate(all_genes.index)]
all_genes.index = g_idx
# formally test differences between gene sets
wilcoxpy = R['wilcox.test']
test_dict = {}
for combs in itertools.combinations(set(all_genes['cat']), r=2):
pos1 = [x for x in combs if re.search("correlated", x)]
pos2 = [q for q in combs if re.search("random", q)]
if not len(pos1):
pos1 = [p for p in combs if re.search("proximal", p)]
elif not pos2:
pos2 = [j for j in combs if re.search("proximal", j)]
vec1 = all_genes['prop_sig'][all_genes['cat'] == pos1[0]]
r_vec1 = ro.FloatVector([r for r in vec1])
vec2 = all_genes['prop_sig'][all_genes['cat'] == pos2[0]]
r_vec2 = ro.FloatVector([g for g in vec2])
res = wilcoxpy(r_vec1, r_vec2, alternative="greater")
pval = res.rx('p.value')[0][0]
stat = res.rx('statistic')[0][0]
test_dict[(pos1[0], pos2[0])] = {"W": stat,
"p-value": pval}
test_table = pd.DataFrame(test_dict).T
cond = cor.split("/")[-1].split("-")[0]
test_table['condition'] = cond
test_table.to_csv("stats.dir/%s-sig_conserved-stats.tsv" % cond,
sep="\t", index_label="reference")
r_genes = pandas2ri.py2ri_pandasdataframe(all_genes)
R.assign("r.genes", r_genes)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''r.lncs$cat <- as.factor(r.lncs$cat)''')
R('''p_sums <- ggplot(r.lncs, aes(x=prop_sig, fill=cat, colour=cat)) + '''
'''geom_density(alpha=0.2) + '''
'''labs(x="Proportion of significantly conserved MREs", y="density", '''
'''title="Proportion of statistically significantly conserved\n'''
'''MREs over lncRNAs")''')
R('''png('%s', height=480, width=480)''' % outfile)
R('''print(p_sums)''')
R('''dev.off()''')
def summMreCons(dataframe):
'''
summarise over a dataframe of MRE conservation scores and
p-values
'''
df_dict = {}
for entry in set(dataframe['target']):
edict = {}
e_df = dataframe[dataframe['target'] == entry]
mean = np.mean(e_df['score'])
n_mres = len(e_df['score'])
n_sig = len([s for s in e_df['pCons'] if s < 0.01])
try:
prop_sig = (n_sig/float(n_mres)) * 100
except ZeroDivisionError:
prop_sig = 0.0
edict = {'mean_phastCons': mean,
'n_MREs': n_mres,
'n_sigP': n_sig,
'prop_sig': prop_sig}
df_dict[entry] = edict
out_df = pd.DataFrame(df_dict).T
return out_df
@P.cluster_runnable
def annotateMreGTF(lnc_gtf, mre_gtf):
'''
annotated a gtf/gff file of predicted MREs with information
from matched lncRNA/gene gtf
'''
lnc_index = GTF.readAndIndex(GTF.iterator(IOTools.openFile(lnc_gtf)))
ofile = IOTools.openFile(mre_gtf, "rb")
for mre in GTF.iterator(ofile):
lnc_source = lnc_index.get(mre.contig, mre.start, mre.end)
for i in lnc_source:
mre.source = i[2].source
yield mre
@P.cluster_runnable
def countMREsOverLncs(mre_gtf, lnc_gtf, outfile):
'''
Count the number of non-redundant MREs overlapping
lncRNA gene models in input
count over whole genes not just transcripts
'''
indices = {}
gtf_it = GTF.readAndIndex(GTF.iterator(IOTools.openFile(lnc_gtf, "rb")))
indices['gene_id'] = gtf_it
trans_gene_dict = {}
counter_dict = {}
for lnc in GTF.iterator(IOTools.openFile(lnc_gtf)):
counter_dict[lnc.gene_id] = 0
trans_gene_dict[lnc.transcript_id] = lnc.gene_id
with IOTools.openFile(mre_gtf, "rb") as mre_open:
mre_it = GTF.iterator_filtered(GTF.iterator(mre_open),
feature="MRE")
for mre in mre_it:
overlap = mre.asDict()['target']
gene_id = trans_gene_dict[overlap]
counter_dict[gene_id] += 1
with IOTools.openFile(outfile, "w") as ofile:
for x in counter_dict.keys():
ofile.write("%s\t%i\n" % (x,
counter_dict[x]))
@P.cluster_runnable
def plotMreDensity(cor_file,
prox_file,
rand_file,
anti_file,
ref_gtf,
lnc_gtf,
outfile):
'''
Plot MRE density as number of MREs per nt over lncRNAs
'''
if cor_file.endswith("gz"):
cor_comp = "gzip"
else:
cor_comp = None
if prox_file.endswith("gz"):
prox_comp = "gzip"
else:
prox_comp = None
if rand_file.endswith("gz"):
rand_comp = "gzip"
else:
rand_comp = None
if anti_file.endswith("gz"):
anti_comp = "gzip"
else:
anti_comp = None
cor_df = pd.read_table(cor_file, sep="\t", index_col=0,
header=0, compression=cor_comp)
cor_df.columns = ['MRE_counts']
cor_df.index.name = 'gene_id'
cor_index = cor_df.index.tolist()
cor_lncs = set([cl for cl in cor_index if re.search("LNC", cl)])
cor_genes = set([cg for cg in cor_index if re.search("ENS", cg)])
prox_df = pd.read_table(prox_file, sep="\t", index_col=0,
header=None, compression=prox_comp)
prox_df.index.name = 'gene_id'
prox_df.columns = ['MRE_counts']
prox_index = prox_df.index.tolist()
prox_lncs = set([pl for pl in prox_index if re.search("LNC", pl)])
prox_genes = set([pg for pg in prox_index if re.search("ENS", pg)])
rand_df = pd.read_table(rand_file, sep="\t", index_col=0,
header=None, compression=rand_comp)
rand_df.index.name = 'gene_id'
rand_df.columns = ['MRE_counts']
rand_index = rand_df.index.tolist()
rand_lncs = set([rl for rl in rand_index if re.search("LNC", rl)])
rand_genes = set([rg for rg in rand_index if re.search("ENS", rg)])
anti_df = pd.read_table(anti_file, sep="\t", index_col=0,
header=0, compression=anti_comp)
anti_df.index.name = 'gene_id'
anti_df.columns = ['MRE_counts']
anti_index = anti_df.index.tolist()
anti_lncs = set([al for al in anti_index if re.search("LNC", al)])
anti_genes = set([ag for ag in anti_index if re.search("ENS", ag)])
cor_len_dict = {}
prox_len_dict = {}
rand_len_dict = {}
anti_len_dict = {}
gene_file = IOTools.openFile(ref_gtf, "rb")
gene_iterator = GTF.transcript_iterator(GTF.iterator(gene_file))
lnc_file = IOTools.openFile(lnc_gtf, "rb")
lnc_iterator = GTF.transcript_iterator(GTF.iterator(lnc_file))
# get all gene and lncRNA lengths
for git in gene_iterator:
for trans in git:
gid = trans.gene_id
if gid in cor_genes and gid in prox_genes and gid not in anti_genes:
try:
cor_len_dict[gid] += (trans.end - trans.start)
prox_len_dict[gid] += (trans.end - trans.start)
except KeyError:
cor_len_dict[gid] = trans.end - trans.start
prox_len_dict[gid] = trans.end - trans.start
elif gid in cor_genes and gid not in prox_genes and gid not in anti_genes:
try:
cor_len_dict[gid] += (trans.end - trans.start)
except KeyError:
cor_len_dict[gid] = trans.end - trans.start
elif gid in rand_genes and gid in prox_genes and gid in anti_genes:
try:
rand_len_dict[gid] += trans.end - trans.start
prox_len_dict[gid] += trans.end - trans.start
anti_len_dict[gid] += trans.end - trans.start
except KeyError:
rand_len_dict[gid] = trans.end - trans.start
prox_len_dict[gid] = trans.end - trans.start
anti_len_dict[gid] = trans.end - trans.start
elif gid in rand_genes and gid in anti_genes and gid not in prox_genes:
try:
rand_len_dict[gid] += trans.end - trans.start
anti_len_dict[gid] += trans.end - trans.start
except KeyError:
rand_len_dict[gid] = trans.end - trans.start
anti_len_dict[gid] = trans.end - trans.start
elif gid in prox_genes and gid in anti_genes and gid not in rand_genes:
try:
prox_len_dict[gid] += trans.end - trans.start
anti_len_dict[gid] += trans.end - trans.start
except KeyError:
prox_len_dict[gid] = trans.end - trans.start
anti_len_dict[gid] = trans.end - trans.start
elif gid in prox_genes and gid in rand_genes and gid not in anti_genes:
try:
prox_len_dict[gid] += trans.end - trans.start
rand_len_dict[gid] += trans.end - trans.start
except KeyError:
prox_len_dict[gid] = trans.end - trans.start
rand_len_dict[gid] = trans.end - trans.start
elif gid in prox_genes and gid not in rand_genes and gid not in anti_genes:
try:
prox_len_dict[gid] += trans.end - trans.start
except KeyError:
prox_len_dict[gid] = trans.end - trans.start
elif gid in rand_genes and gid not in prox_genes and gid not in anti_genes:
try:
rand_len_dict[gid] += trans.end - trans.start
except KeyError:
rand_len_dict[gid] = trans.end - trans.start
elif gid in anti_genes and gid not in prox_genes and gid not in rand_genes:
try:
anti_len_dict[gid] += trans.end - trans.start
except KeyError:
anti_len_dict[gid] = trans.end - trans.start
else:
pass
gene_file.close()
for lit in lnc_iterator:
for tran in lit:
lid = tran.gene_id
if lid in cor_lncs:
try:
cor_len_dict[lid] += tran.end - tran.start
prox_len_dict[lid] += tran.end - tran.start
anti_len_dict[lid] += tran.end - tran.start
rand_len_dict[lid] += tran.end - tran.start
except KeyError:
cor_len_dict[lid] = tran.end - tran.start
prox_len_dict[lid] = tran.end - tran.start
anti_len_dict[lid] = tran.end - tran.start
rand_len_dict[lid] = tran.end - tran.start
elif lid in rand_lncs:
try:
rand_len_dict[lid] += tran.end - tran.start
prox_len_dict[lid] += tran.end - tran.start
anti_len_dict[lid] += tran.end - tran.start
except KeyError:
rand_len_dict[lid] = tran.end - tran.start
prox_len_dict[lid] = tran.end - tran.start
anti_len_dict[lid] = tran.end - tran.start
elif lid in anti_lncs:
try:
prox_len_dict[lid] += tran.end - tran.start
anti_len_dict[lid] += tran.end - tran.start
except KeyError:
prox_len_dict[lid] = tran.end - tran.start
anti_len_dict[lid] = tran.end - tran.start
elif lid in prox_lncs:
try:
prox_len_dict[lid] += tran.end - tran.start
except KeyError:
prox_len_dict[lid] = tran.end - tran.start
else:
pass
lnc_file.close()
cor_df['length'] = pd.Series(cor_len_dict, dtype=np.float64)
prox_df['length'] = pd.Series(prox_len_dict, dtype=np.float64)
rand_df['length'] = pd.Series(rand_len_dict, dtype=np.float64)
anti_df['length'] = pd.Series(anti_len_dict, dtype=np.float64)
cor_df['density'] = cor_df['MRE_counts']/(cor_df['length']/1000.0)
prox_df['density'] = prox_df['MRE_counts']/(prox_df['length']/1000.0)
rand_df['density'] = rand_df['MRE_counts']/(rand_df['length']/1000.0)
anti_df['density'] = anti_df['MRE_counts']/(anti_df['length']/1000.0)
cor_lnc_counts = cor_df.loc[cor_lncs]
prox_lnc_counts = prox_df.loc[prox_lncs]
rand_lnc_counts = rand_df.loc[rand_lncs]
anti_lnc_counts = anti_df.loc[anti_lncs]
cor_lnc_counts['cat'] = "correlated"
prox_lnc_counts['cat'] = "proximal"
rand_lnc_counts['cat'] = "random"
anti_lnc_counts['cat'] = "anticorrelated"
cor_lnc_counts['group'] = "correlated/proximal"
prox_lnc_counts['group'] = "correlated/proximal"
rand_lnc_counts['group'] = "random"
anti_lnc_counts['group'] = "anticorrelated"
all_lnc_frame = cor_lnc_counts.append(prox_lnc_counts)
all_lnc_frame = all_lnc_frame.append(rand_lnc_counts)
all_lnc_frame = all_lnc_frame.append(anti_lnc_counts)
all_lnc_frame.index = [ix for ix, iy in enumerate(all_lnc_frame.index)]
# break if all counts are zero or < 10 objects
if max(all_lnc_frame['MRE_counts']) == 0 or len(all_lnc_frame) < 10:
P.touch(outfile)
return 0
else:
pass
pandas2ri.activate()
r_lnc_df = pandas2ri.py2ri_pandasdataframe(all_lnc_frame)
# formally test differences between gene sets with wilcoxon test
wilcoxpy = R['wilcox.test']
test_dict = {}
for combs in itertools.combinations(set(all_lnc_frame['group']), r=2):
vec1 = all_lnc_frame['density'][all_lnc_frame['group'] == combs[0]].values
r_vec1 = ro.FloatVector([f for f in vec1])
vec2 = all_lnc_frame['density'][all_lnc_frame['group'] == combs[1]].values
r_vec2 = ro.FloatVector([g for g in vec2])
res = wilcoxpy(r_vec1, r_vec2, alternative="greater")
pval = res.rx('p.value')[0][0]
stat = res.rx('statistic')[0][0]
test_dict[(combs[0], combs[1])] = {"W": stat,
"p-value": pval}
test_table = pd.DataFrame(test_dict).T
cond = cor_file.split("/")[-1].split("-")[0]
test_table.columns = ['W', 'p-value']
test_table['condition'] = cond
test_table.to_csv("stats.dir/%s-MRE_density-stats.tsv" % cond,
sep="\t", index_label="reference")
R.assign("r.df", r_lnc_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''r.df$group <- as.factor(r.df$group)''')
R('''p_des <- ggplot(r.df, aes(x=density, fill=group, colour=group)) + '''
'''geom_density(alpha=0.2) + labs(x="MREs per kb", y="density") + '''
'''theme(text=element_text(size=14, colour="black")) + theme_bw()''')
R('''png('%s', height=480, width=480)''' % outfile)
R('''print(p_des)''')
R('''dev.off()''')
@P.cluster_runnable
def plotMreCounts(counts_file, outfile):
'''
plot output from countMREsOverLncs as histograms
for both genes and lncRNAs
'''
mre_frame = pd.read_table(counts_file,
sep="\t",
header=None,
index_col=0)
mre_frame.columns = ['MRE_counts']
mre_frame.index.name = 'gene_id'
mre_frame['biotype'] = ['' for cx in mre_frame.index]
df_index = mre_frame.index.tolist()
lncs = [cl for cl in df_index if re.search("LNC", cl)]
genes = [cg for cg in df_index if re.search("ENS", cg)]
lnc_counts = mre_frame.loc[lncs]
gene_counts = mre_frame.loc[genes]
lnc_counts['biotype'] = "lncRNA"
gene_counts['biotype'] = "gene"
cor_mres = gene_counts.append(lnc_counts)
tot_val = len(cor_mres['MRE_counts'].values)
chained = itertools.chain(lnc_counts['MRE_counts'].values,
gene_counts['MRE_counts'].values)
max_val = max([s for s in chained])
# if all values are zero, touch a sentinel file and
# break out of function
if max_val == 0:
P.touch(outfile)
return 0
else:
pass
fig = plt.figure()
ax1 = fig.add_subplot(211)
lnc_vals = lnc_counts['MRE_counts'].values
binwidth = int(max_val/float(tot_val/5.0))
ax1.grid(True)
try:
ax1.hist(lnc_vals,
facecolor="blue",
label="lncRNA MRE counts",
bins=range(0, max_val + binwidth, binwidth))
except ValueError:
ax1.hist(lnc_vals,
facecolor="blue",
label="lncRNA MRE counts",
bins=range(0, max_val + binwidth))
ax1.legend()
ax2 = fig.add_subplot(212)
gene_vals = gene_counts['MRE_counts'].values
try:
ax2.hist(gene_vals,
facecolor="red",
label="gene MRE counts",
bins=range(0, max_val + binwidth, binwidth))
except ValueError:
ax2.hist(gene_vals,
facecolor="red",
label="gene MRE counts",
bins=range(0, max_val + binwidth))
ax2.grid(True)
ax2.legend()
fig.savefig(outfile)
def plotViolinCounts(infile, outfile):
'''
Generate ggplot violin plots of MRE count distributions
for genes and lncRNAs
'''
# use R code for now - need to work this out in matplotlib
mre_df = pd.read_table(infile, sep="\t", header=None, index_col=0)
mre_df.columns = ['MRE_counts']
mre_df.index.name = "gene_id"
mre_df['biotype'] = ['' for px in mre_df.index]
idx = mre_df.index.tolist()
lncs = [pl for pl in idx if re.search("LNC", pl)]
genes = [pg for pg in idx if re.search("ENS", pg)]
lnc_counts = mre_df.loc[lncs]
gene_counts = mre_df.loc[genes]
lnc_counts['biotype'] = "lncRNA"
gene_counts['biotype'] = "gene"
all_counts = gene_counts.append(lnc_counts)
all_idx = all_counts.index.tolist()
r_df = pandas2ri.py2ri_pandasdataframe(all_counts)
R.assign("r.df", r_df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p_g <- ggplot(r.df, aes(y=MRE_counts, x=as.factor(biotype),'''
'''colour=as.factor(biotype), fill=as.factor(biotype))) + '''
'''geom_violin() + labs(x="Biotype", y="MRE counts") + '''
'''guides(colour=F, fill=F)''')
R('''png("%(outfile)s", height=540, width=540)''' % locals())
R('''print(p_g)''')
R('''dev.off()''')
@P.cluster_runnable
def plotSharedCounts(cor_file,
prox_file,
random_file,
anti_file,
outfile):
''' density plots of shared miRNAs between gene:lncRNA pairs'''
if cor_file.endswith("gz"):
cor_comp = "gzip"
else:
cor_comp = None
if prox_file.endswith("gz"):
prox_comp = "gzip"
else:
prox_comp = None
if random_file.endswith("gz"):
rand_comp = "gzip"
else:
rand_comp = None
if anti_file.endswith("gz"):
anti_comp = "gzip"
else:
anti_comp = None
cor_df = pd.read_table(cor_file, sep="\t", index_col=None,
header=0, compression=cor_comp,
comment='#')
cdenom = cor_df['total_shared']/cor_df['unshared']
cshare = cor_df['total_shared']/cdenom
cor_df['prop_shared'] = cshare
cor_df['ratio_shared'] = cor_df['total_shared']/cor_df['unshared']
prox_df = pd.read_table(prox_file, sep="\t", index_col=None,
header=0, compression=prox_comp,
comment='#')
pdenom = prox_df['total_shared']/prox_df['unshared']
pshare = prox_df['total_shared']/pdenom
prox_df['prop_shared'] = pshare
prox_df['ratio_shared'] = prox_df['total_shared']/prox_df['unshared']
rand_df = pd.read_table(random_file, sep="\t", index_col=None,
header=0, compression=rand_comp,
comment='#')
rdenom = rand_df['total_shared']/rand_df['unshared']
rshare = rand_df['total_shared']/rdenom
rand_df['prop_shared'] = rshare
rand_df['ratio_shared'] = rand_df['total_shared']/rand_df['unshared']
anti_df = pd.read_table(anti_file, sep="\t", index_col=None,
header=0, compression=anti_comp,
comment='#')
adenom = anti_df['total_shared']/anti_df['unshared']
ashare = anti_df['total_shared']/adenom
anti_df['prop_shared'] = ashare
anti_df['ratio_shared'] = anti_df['total_shared']/anti_df['unshared']
cor_df['cat'] = "correlated"
prox_df['cat'] = "proximal"
rand_df['cat'] = "random"
anti_df['cat'] = "anticorrelated"
all_shared = cor_df.append(rand_df)
all_shared = all_shared.append(prox_df)
all_shared = all_shared.append(anti_df)
# need to re-index data frame after append to prevent duplicate indices
new = [x for x, y in enumerate(all_shared.index)]
all_shared.index = new
# formally test shared miRNAs between gene sets
wilcoxpy = R['wilcox.test']
test_dict = {}
for combs in itertools.combinations(set(all_shared['cat']), r=2):
vec1 = all_shared['total_shared'][all_shared['cat'] == combs[0]]
r_vec1 = ro.FloatVector([f for f in vec1.values])
vec2 = all_shared['total_shared'][all_shared['cat'] == combs[1]]
r_vec2 = ro.FloatVector([g for g in vec2.values])
res = wilcoxpy(r_vec1, r_vec2, alternative="greater")
pval = res.rx('p.value')[0][0]
stat = res.rx('statistic')[0][0]
test_dict[(combs[0], combs[1])] = {"W": stat,
"p-value": pval}
test_table = pd.DataFrame(test_dict).T
cond = cor_file.split("/")[-1].split("-")[0]
test_table['condition'] = cond
test_table.to_csv("stats.dir/%s-MRE_shared-stats.tsv" % cond,
sep="\t", index_label="reference")
r_share = pandas2ri.py2ri_pandasdataframe(all_shared)
R.assign("shared.df", r_share)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''shared.df$cat <- as.factor(shared.df$cat)''')
R('''p_share <- ggplot(shared.df, aes(x=total_shared, '''
'''fill=cat, colour=cat)) + geom_density(alpha=0.2) + '''
'''labs(x="Total number of shared miRNAs", y="density") + '''
'''theme(text=element_text(size=14, colour="black")) + theme_bw()''')
R('''png("%s", height=480, width=480)''' % outfile)
R('''print(p_share)''')
R('''dev.off()''')
def getMREs(mre_file, pairs_gtf):
'''
Get MREs for all lncRNAs and genes
'''
trans_gene_dict = {}
catalog = {}
# log all miRNAs for each gene and lncRNA
with IOTools.openFile(pairs_gtf, "rb") as gfile:
pairs_it = GTF.iterator(gfile)
for it in pairs_it:
trans_gene_dict[it.transcript_id] = it.gene_id
catalog[it.gene_id] = set()
with IOTools.openFile(mre_file, "rb") as mfile:
mre_it = GTF.iterator_filtered(GTF.iterator(mfile))
for mre in mre_it:
target = mre.asDict()['target']
mirna = mre.asDict()['miRNA']
gene = trans_gene_dict[target]
catalog[gene].add(mirna)
return catalog
@P.cluster_runnable
def shareMREs(mre_file, pairs_gtf, correlations, outfile):
'''
Find the shared MREs between highly correlated
lncRNA:gene pairs.
Output:
* list of shared miRNAs for each pair
* number and proportion of shared MREs between pairs
'''
catalog = getMREs(mre_file, pairs_gtf)
if correlations.split(".")[-1] == "gz":
comp = "gzip"
else:
comp = None
cor_df = pd.read_table(correlations,
sep="\t",
compression=comp,
header=0)
shared_dict = {}
for idx in cor_df.index:
share = {}
gene = cor_df.loc[idx]['gene_id']
lnc = cor_df.loc[idx]['lncRNA_id']
gmirs = catalog[gene]
lmirs = catalog[lnc]
shared = gmirs.intersection(lmirs)
not_shared = gmirs.difference(lmirs)
try:
lnc_prop = len(shared)/float(len(lmirs))
except ZeroDivisionError:
lnc_prop = 0.0
try:
gene_prop = len(shared)/float(len(gmirs))
except ZeroDivisionError:
gene_prop = 0.0
share['gene_id'] = gene
share['lncRNA_id'] = lnc
share['unshared'] = len(not_shared)
share['total_shared'] = len(shared)
share['lncRNA_shared_proportion'] = lnc_prop
share['gene_shared_proportion'] = gene_prop
share['shared_miRNAs'] = ",".join([x for x in shared])
shared_dict[idx] = share
out_frame = pd.DataFrame(shared_dict).T
# get gene symbol ids for genes
mg = mygene.MyGeneInfo()
try:
q_symbol = mg.querymany(out_frame['gene_id'].tolist(),
scopes="ensemblgene",
species="mouse",
fields="symbol",
returnall=True)['out']
except AssertionError:
gene_set = [gsx for gsx in set(out_frame['gene_id'].tolist())]
q_symbol = mg.querymany(gene_set,
scopes="ensemblgene",
species="mouse",
fields="symbol",
returnall=True)['out']
q_df = pd.DataFrame(q_symbol)
try:
q_df.drop(['_id', 'notfound'], inplace=True, axis=1)
except ValueError:
pass
outdf = pd.merge(left=out_frame, right=q_df, how='inner',
left_on='gene_id', right_on='query')
outdf.drop(['query'], inplace=True, axis=1)
outdf.to_csv(outfile, sep="\t", index=None)
@P.cluster_runnable
def countSharedMREs(mre_file,
pairs_gtf,
shared_file,
outfile):
'''
Count the number of elements in each gene and lncRNA for
which a targetting miRNA is shared.
'''
if shared_file.split(".")[-1] == "gz":
comp = "gzip"
else:
comp = None
shared_df = pd.read_table(shared_file,
sep="\t",
header=0,
index_col=None,
compression=comp)
# 'shared_miRNAs' are a single string of comma-separated ids
# need to split these to store for later in an array/list
shared_df = shared_df.fillna("")
shared_mirs = [h.split(",") for h in shared_df['shared_miRNAs'].values]
shared_df['shared_miRNAs'] = shared_mirs
# make a dictionary mapping gene_ids onto transcript ids - mre.gtf
# only contains transcript ids
# catalog will be all of the miRNA ids mapping to a gene/lncRNA
trans_gene_dict = {}
catalog = {}
mre_dict = {}
gene_ids = shared_df['gene_id'].values
lnc_ids = shared_df['lncRNA_id'].values
with IOTools.openFile(pairs_gtf, "rb") as gfile:
pairs_it = GTF.iterator(gfile)
for it in pairs_it:
trans_gene_dict[it.transcript_id] = it.gene_id
catalog[it.gene_id] = set()
with IOTools.openFile(mre_file, "rb") as mfile:
mre_it = GTF.iterator_filtered(GTF.iterator(mfile), "MRE")
for mre in mre_it:
gene = trans_gene_dict[mre.asDict()['target']]
mre_entry = {'miRNA': mre.asDict()['miRNA'],
'target': gene,
'seed_class': mre.asDict()['seed_class'],
'contig': mre.contig,
'start': mre.start,
'end': mre.end,
'strand': mre.strand}
mre_dict[mre.gene_id] = mre_entry
mre_df = pd.DataFrame(mre_dict).T
# count the number of MREs for each miRNA in each gene/lncRNA
target_dict = {}
targets = set(mre_df['target'].values)
for tar in targets:
mir_dict = {}
tar_df = mre_df[mre_df['target'] == tar]
mirs = set(tar_df['miRNA'].values)
for mi in mirs:
mir_dict[mi] = len(tar_df[tar_df['miRNA'] == mi])
target_dict[tar] = mir_dict
# count sites for shared miRNAs
shared_gene_counts = {}
for gene in gene_ids:
mres = {}
shared_genes = shared_df[shared_df["gene_id"] == gene]
shared_mirs = shared_genes['shared_miRNAs'].values[0]
for mi in shared_mirs:
if len(mi):
count = target_dict[gene][mi]
mres[mi] = count
else:
pass
shared_gene_counts[gene] = mres
shared_lnc_counts = {}
for lnc in lnc_ids:
mres = {}
shared_lncs = shared_df[shared_df['lncRNA_id'] == lnc]
shared_mirs = shared_lncs['shared_miRNAs'].values
for mi in shared_mirs[0]:
if len(mi):
count = target_dict[lnc][mi]
mres[mi] = count
else:
pass
shared_lnc_counts[lnc] = mres
# generate the final table of genes, lncs and shared miRNAs with counts
shared_mres_dict = {}
for idx in shared_df.index:
gene = shared_df.iloc[idx]['gene_id']
lnc = shared_df.iloc[idx]['lncRNA_id']
for mir in shared_df.iloc[idx]['shared_miRNAs']:
try:
gene_mirs = shared_gene_counts[gene][mir]
except KeyError:
gene_mirs = 0
try:
lnc_mirs = shared_lnc_counts[lnc][mir]
except KeyError:
lnc_mirs = 0
mir_dict = {'miRNA': mir,
'gene_counts': gene_mirs,
'lncRNA_counts': lnc_mirs}
shared_mres_dict[(gene, lnc)] = pd.Series(mir_dict)
shared_mres_df = pd.DataFrame(shared_mres_dict).T
shared_mres_df.to_csv(outfile, sep="\t")
@P.cluster_runnable
def correlateRandomPairs(pairs_file, expression, ref_gtf, outfile, seed):
'''
Cross-correlate random pairs of lncRNAs and protein-coding
genes
'''
if pairs_file.split(".")[-1] == "gz":
pair_comp = "gzip"
else:
pair_comp = None
if expression.split(".")[-1] == "gz":
expr_comp = "gzip"
else:
expr_comp = None
expr_df = pd.read_table(expression,
compression=expr_comp,
sep="\t",
header=0,
index_col=0)
pairs_df = pd.read_table(pairs_file,
compression=pair_comp,
sep="\t",
header=0,
index_col=0)
lnc_ids = set(pairs_df['lncRNA_id'])
gene_ids = pairs_df.index
all_lncs = [l for l in expr_df.index if re.search("LNC", l)]
l_expr = expr_df.loc[all_lncs]
l_expr.index.name = "lncRNA_id"
all_genes = [g for g in expr_df.index if re.search("ENS", g)]
g_expr = expr_df.loc[all_genes]
# get lncRNA classifications from reference gtf
# get transcript lengths for matching
ofile = IOTools.openFile(ref_gtf, "rb")
gene_it = GTF.transcript_iterator(GTF.iterator(ofile))
class_dict = {}
length_dict = {}
for gene in gene_it:
for trans in gene:
class_dict[trans.gene_id] = {'class': trans.source,
'exon': trans.asDict()['exon_status']}
try:
length_dict[trans.gene_id] += (trans.end - trans.start)
except KeyError:
length_dict[trans.gene_id] = (trans.end - trans.start)
ofile.close()
# expression from pairs file
pairs_lexpr = l_expr.loc[lnc_ids]
# randomly sub sample genes and lncRNAs
# match lncRNA expression to +- 0.5 of correlated lncRNAs
# and length within 1kb
random.seed(seed)
lnc_idxs = set()
l_count = 0
# there may not be sufficient random matched lncRNAs
# to have a 1:1 match with the proximal/correlated lncRNAs
max_hits = 0
while max_hits < 100:
# randomly select a lncRNA from all expressed lncRNAs
r_lnc = random.randint(0, len(all_lncs) - 1)
r_lnc_name = expr_df.iloc[r_lnc].name
# randomly select a matched lncRNA from the pairs file
r_match = random.randint(0, len(lnc_ids) - 1)
# check these are not the same lncRNA
if r_lnc_name != pairs_lexpr.iloc[r_match].name:
rclass = class_dict[r_lnc_name]['class']
rexon = class_dict[r_lnc_name]['exon']
lnc_len = length_dict[r_lnc_name]
# select multi-exonic intergenic lncRNAs only
if rclass == "intergenic" and rexon == "m":
hi_xpr = np.mean(pairs_lexpr.iloc[r_match]) + 1.0
lo_xpr = np.mean(pairs_lexpr.iloc[r_match]) - 1.0
hi_len = lnc_len + 1000
lo_len = lnc_len - 1000
if hi_xpr < np.mean(expr_df.iloc[r_lnc]):
pass
elif lo_xpr > np.mean(expr_df.iloc[r_lnc]):
pass
else:
if lnc_len > hi_len:
pass
elif lnc_len < lo_len:
pass
else:
# only add random lnc if matched on expression
# lncRNA transcript length
# and lncRNA classification, but not ID
E.info("Adding lncRNA {} to pool".format(r_lnc))
set_len = len(lnc_idxs)
lnc_idxs.add(r_lnc)
if set_len == len(lnc_idxs):
E.info("lncRNA already in set")
max_hits += 1
else:
E.info("{}/{} lncRNAs selected".format(len(lnc_idxs),
len(lnc_ids)))
else:
pass
else:
pass
E.info("matched {} lncRNAs".format(len(lnc_idxs)))
gene_idxs = set()
while len(gene_idxs) != len(lnc_idxs):
gene_idxs.add(random.randint(0, len(all_genes) - 1))
# correlate random genes and lncRNAs
rand_lncs = l_expr.iloc[[i for i in lnc_idxs]]
rand_genes = g_expr.iloc[[q for q in gene_idxs]]
r_lncs = rand_lncs.index
r_genes = rand_genes.index
rand_cor_df = pd.DataFrame(index=r_lncs,
columns=['gene_id', 'correlation'])
for each in itertools.izip(r_lncs, r_genes):
lval = rand_lncs.loc[each[0]].tolist()
gval = rand_genes.loc[each[1]].tolist()
rcor = TS.crossCorrelate(lval, gval, lag=0)
rand_cor_df.loc[each[0]]['gene_id'] = each[1]
rand_cor_df.loc[each[0]]['correlation'] = rcor[0]
rand_cor_df['lncRNA_id'] = rand_cor_df.index
rand_cor_df.index = rand_cor_df['gene_id']
rand_cor_df.drop(['gene_id'], inplace=True, axis=1)
rand_cor_df.to_csv(outfile, sep="\t", index_label="gene_id")
@P.cluster_runnable
def antiCorrelatePairs(pairs_file, expression, outfile, threshold):
'''
Get lncRNAs with paired protein-coding genes that
are anti-correlated in expression
'''
# need to restrict the number of transcripts/lncRNAs correlated.
if pairs_file.split(".")[-1] == "gz":
cor_comp = "gzip"
else:
cor_comp = None
if expression.split(".")[-1] == "gz":
expr_comp = "gzip"
else:
expr_comp = None
pair_df = pd.read_table(pairs_file,
compression=cor_comp,
sep="\t",
header=0,
index_col=0)
expr_df = pd.read_table(expression,
compression=expr_comp,
sep="\t",
header=0,
index_col=None)
expr_df.columns = ['lncRNA_id', 'gene_id', 'value']
# select lncRNAs that are highly correlated
# select genes that are anti-correlated with these lncRNAs
lncs = set([l for l in pair_df['lncRNA_id'] if re.search("LNC", l)])
anticor_dict = {}
for lnc in lncs:
gene_cors = expr_df.loc[expr_df['lncRNA_id'] == lnc]
min_cor = np.min(gene_cors['value'])
gene_pair = gene_cors.loc[gene_cors['value'] == min_cor]
anticor_dict[lnc] = {'gene_id': gene_pair['gene_id'].values[0],
'value': gene_pair['value'].values[0]}
anticor_df = pd.DataFrame(anticor_dict).T
anticor_df.to_csv(outfile, sep="\t", index_label="lncRNA_id")
@P.cluster_runnable
def correlateProximalPairs(distances, pairs_file, expression, outfile):
'''
Get lncRNAs with most proximal protein-coding gene,
calculate cross-correlation of expression.
'''
if pairs_file.split(".")[-1] == "gz":
cor_comp = "gzip"
else:
cor_comp = None
if distances.split(".")[-1] == "gz":
dist_comp = "gzip"
else:
dist_comp = None
if expression.split(".")[-1] == "gz":
expr_comp = "gzip"
else:
expr_comp = None
pair_df = pd.read_table(pairs_file,
compression=cor_comp,
sep="\t",
header=0,
index_col=0)
dist_df = pd.read_table(distances,
compression=dist_comp,
sep="\t",
header=0,
index_col=0)
expr_df = pd.read_table(expression,
compression=expr_comp,
sep="\t",
header=0,
index_col=0)
lnc_dists = dist_df.loc[set(pair_df['lncRNA_id'].values)]
lnc_expr = expr_df.loc[set(lnc_dists.index)]
gene_expr = expr_df.loc[lnc_dists['closest_id']]
gene_expr['gene_id'] = gene_expr.index
gene_expr.drop_duplicates(subset='gene_id',
take_last=True,
inplace=True)
gene_expr.drop(['gene_id'], inplace=True, axis=1)
lncs = lnc_expr.index
genes = gene_expr.index
# get correlations between all lncs and genes,
# regardless of proximity - subset later
cor_frame = pd.DataFrame(index=lncs, columns=genes)
cor_frame = cor_frame.fillna(0.0)
pairs = itertools.product(lncs, genes)
for each in pairs:
lnc_val = lnc_expr.loc[each[0]].tolist()
gene_val = gene_expr.loc[each[1]].tolist()
cor = TS.crossCorrelate(lnc_val, gene_val, lag=0)
cor_frame.loc[each[0]][each[1]] = cor
cor_frame = cor_frame.fillna(0.0)
cor_frame.index.name = "lncRNA_id"
unstack = cor_frame.unstack()
cor_list = unstack.reset_index()
cor_list.columns = ['gene_id', 'lncRNA_id', 'correlation']
cor_list.index = cor_list['lncRNA_id']
cor_list.drop(['lncRNA_id'], inplace=True, axis=1)
prox_cors = {}
for idx in cor_list.index:
cors = cor_list.loc[idx]
cors.index = cors['gene_id']
prox_gene = lnc_dists.loc[idx]['closest_id']
prox_cors[idx] = {'gene_id': prox_gene,
'correlation': cors.loc[prox_gene]['correlation']}
prox_cor_df = pd.DataFrame(prox_cors).T
prox_cor_df['lncRNA_id'] = prox_cor_df.index
prox_cor_df.index = prox_cor_df['gene_id']
prox_cor_df.drop(['gene_id'], inplace=True, axis=1)
prox_cor_df.to_csv(outfile, sep="\t", index_label="gene_id")
def tempCorr(x, y):
'''
Temporal correlation of two time series
of the form:
ux, uy = mean of x and y
Corr(x, y) = sum((x-ux)(y-uy))/(sqrt(var(x)) * sqrt(var(x)))
'''
sum_prod = []
sum_xsq = []
sum_ysq = []
for i in range(len(x) - 1):
xi = float(x[i+1]) - float(x[i])
yi = float(y[i+1]) - float(y[i])
prod = xi * yi
sum_prod.append(prod)
sq_x = xi**2
sq_y = yi**2
sum_xsq.append(sq_x)
sum_ysq.append(sq_y)
nume = sum(sum_prod)
denom = float(math.sqrt(sum(sum_xsq)) * math.sqrt(sum(sum_ysq)))
if denom != 0:
return nume/denom
else:
return 0
def crossCorrelate(t, s, lag=0):
'''
Calculate the cross-correlation of two timeseries, s and t.
Return the normalized correlation value at lag=n.
Uses numpy.correlate; default is to return lag=0.
TODO: return multiple lags?
'''
t_mean = np.mean(t)
s_mean = np.mean(s)
t_std = np.std(t)
s_std = np.std(s)
len_t = len(t)
t_norm = [((x - t_mean)/(t_std * len_t)) for x in t]
s_norm = [((y - s_mean)/s_std) for y in s]
if lag == 0:
xcorr = np.correlate(t_norm, s_norm)
elif lag != 0:
xcorr = np.correlate(t_norm, s_norm, mode=2)[len_t - 1 + lag]
return xcorr
def correlateEigengenes(ref_eigens,
lnc_eigens,
correlation,
lag=0):
'''
Correlate two sets of eigenenes.
Specify correlation types. Current options are temporal
and cross-correlation.
'''
if ref_eigens.split(".")[-1] == "gz":
ref_comp = "gzip"
else:
ref_comp = None
if lnc_eigens.split(".")[-1] == "gz":
lnc_comp = "gzip"
else:
lnc_comp = None
ref_df = pd.read_table(ref_eigens, sep="\t",
header=0, index_col=0,
compression=ref_comp)
lnc_df = pd.read_table(lnc_eigens, sep="\t",
header=0, index_col=0,
compression=lnc_comp)
corr_frame = correlateLncRNAs(lnc_df,
ref_df,
correlation,
lag)
return corr_frame
def correlateGenesLncs(gene_list, express, correlation,
lncs_list=None):
'''
Cross correlate all lncRNAs and a set of
specific protein-coding genes. A subset of lncRNAs
can be selected by providing a list lncRNA IDs
'''
exprs_df = pd.read_table(express,
sep="\t",
index_col=None,
header=0)
exprs_df.set_index('gene', inplace=True, drop=True)
gene_df = exprs_df.loc[gene_list]
if not lncs_list:
lncs_list = [lx for lx in exprs_df.index if re.search("LNC", lx)]
else:
pass
lnc_df = exprs_df.loc[lncs_list]
cor_frame = pd.DataFrame(index=lncs_list, columns=gene_list)
cor_frame = cor_frame.fillna(0.0)
lag = 0
if correlation == "cross-correlation":
for x in itertools.product(lncs_list, gene_list):
lnc_vals = lnc_df.loc[x[0]].tolist()
gene_vals = gene_df.loc[x[1]].tolist()
corr = crossCorrelate(lnc_vals, gene_vals, lag)
cor_frame[x[1]][x[0]] = corr
else:
pass
return cor_frame
def correlateLncRNAs(lnc_frame, gene_frame, correlation, lag=0):
'''
Use temporal correlation to correlate lncRNA time series
expression profiles with input expression profiles.
'''
lnc_id = lnc_frame.index
gene_id = gene_frame.index
cor_frame = pd.DataFrame(index=lnc_id, columns=gene_id)
cor_frame = cor_frame.fillna(0.0)
if correlation == "temporal":
for x in itertools.product(lnc_id, gene_id):
lnc_vals = lnc_frame.loc[x[0]].tolist()
gene_vals = gene_frame.loc[x[1]].tolist()
corr = tempCorr(lnc_vals, gene_vals)
cor_frame[x[1]][x[0]] = corr
elif correlation == "cross-correlation":
for x in itertools.product(lnc_id, gene_id):
lnc_vals = lnc_frame.loc[x[0]].tolist()
gene_vals = gene_frame.loc[x[1]].tolist()
corr = crossCorrelate(lnc_vals, gene_vals, lag)
cor_frame[x[1]][x[0]] = corr
return cor_frame
def filterCorrelations(infile, threshold=None):
'''
output list of gene1:gene2:value
'''
if infile.split(".")[-1] == "gz":
comp = "gzip"
else:
comp = None
cor_frame = pd.read_table(infile, sep="\t", header=0,
index_col=0, compression=comp)
cor_list = cor_frame.unstack()
cor_list = cor_list.reset_index()
cor_list.columns = ['lncRNA_id', 'gene_id', 'value']
cor_list.index = cor_list['gene_id']
cor_list.drop(['gene_id'], inplace=True, axis=1)
if threshold:
keep_cor = cor_list['value'] >= threshold
cor_list = cor_list.loc[keep_cor]
else:
pass
return cor_list
def correlateLncRNAsWithClusterGenes(cor_file,
expr_file,
set1_clusters,
set2_clusters,
correlation,
lag=0):
'''
Correlate lncRNAs with genes within clusters for which
the cluster eigengene is correlated with the lncRNA
cor_file = correlation of set1 and set2 eigengenes
expr_file = expression data of genes and lncRNAs in all clusters
set1_clusters = cluster labels for expr_file gene_ids
set2_clusters = cluster labels for lnc_expr lncRNA_ids
correlation = correlation measure; cross-correlation or temporal
threshold = correlation threshold (positive only)
lag = lag to report, for cross-correlation only
'''
# handle gzip compressed input files
if cor_file.split(".")[-1] == "gz":
cor_comp = "gzip"
else:
cor_comp = None
if expr_file.split(".")[-1] == "gz":
expr_comp = "gzip"
else:
expr_comp = None
if set1_clusters.split(".")[-1] == "gz":
set1_comp = "gzip"
else:
set1_comp = None
if set2_clusters.split(".")[-1] == "gz":
set2_comp = "gzip"
else:
set2_comp = None
cor_df = pd.read_table(cor_file,
sep="\t",
index_col=None,
header=0,
compression=cor_comp)
cor_df.columns = ['set1', 'set2', 'value']
expr_df = pd.read_table(expr_file,
sep="\t",
index_col=0,
header=0,
compression=expr_comp)
# cluster ids for expr_file
set1_df = pd.read_table(set1_clusters,
sep="\t",
index_col=0,
header=0,
compression=set1_comp)
# cluster ids for lnc_expr
set2_df = pd.read_table(set2_clusters,
sep="\t",
index_col=0,
header=0,
compression=set2_comp)
set1_df.columns = ['gene_id', 'cluster']
set2_df.columns = ['gene_id', 'cluster']
set1_df.index = set1_df['gene_id']
set2_df.index = set2_df['gene_id']
set1_df.drop(['gene_id'], inplace=True, axis=1)
set2_df.drop(['gene_id'], inplace=True, axis=1)
corr_dict = {}
for x in cor_df.index:
set1_ids, set2_ids = cor_df.loc[x]['set1'], cor_df.loc[x]['set2']
gene_ids = set1_df[set1_df['cluster'] == set1_ids]
lnc_ids = set2_df[set2_df['cluster'] == set2_ids]
lnc_vals = expr_df.loc[lnc_ids.index.tolist()]
gene_vals = expr_df.loc[gene_ids.index.tolist()]
# select lncRNAs and genes in correlated cluster eigengenes
# output gene:lncRNA:correlation:gene_cluster:lncRNA_cluster
E.info("correlations for genes in cluster %s "
"and lncRNAs in cluster %s" % (set1_ids, set2_ids))
cluster_cor = correlateLncRNAs(lnc_vals,
gene_vals,
correlation,
lag)
cluster_cor['lncRNA_id'] = cluster_cor.index
cor_list = pd.melt(cluster_cor, id_vars='lncRNA_id')
cor_list['gene_cluster'] = set1_ids
cor_list['lncRNA_cluster'] = set2_ids
cor_list.index = cor_list['gene_id']
cor_list.drop(['gene_id'], inplace=True, axis=1)
corr_dict[(set1_ids, set2_ids)] = cor_list
clusters = corr_dict.keys()
cluster1 = clusters[0]
clusters.remove(cluster1)
results_frame = corr_dict[cluster1]
for clust in clusters:
results_frame = results_frame.append(corr_dict[clust])
return results_frame
@P.cluster_runnable
def compareFovsGC(infile, fo_gc, image_dir):
'''
Compare results from time point differential expression analysis
to Fo -> GC differential analysis results.
'''
name_list = infile.split("/")[-1].split("_")
p_name = name_list[0] + "_" + name_list[2]
p_name = p_name.rstrip("-time.tsv")
df = pd.read_table(infile,
sep="\t",
header=0,
index_col=0)
# select differentially expressed genes with p <= 0.01
# intersect gene_ids and subset these for plotting
df = df[df['padj'] <= 0.01]
fo_gc = fo_gc[fo_gc['padj'] <= 0.01]
agree = []
overlap = set(df.index).intersection(set(fo_gc.index))
for x in overlap:
val1 = df.loc[x]['log2FoldChange']
val2 = fo_gc.loc[x]['log2FoldChange']
if (val1 > 0) and (val2 > 0):
agree.append(x)
elif (val1 < 0) and (val2 < 0):
agree.append(x)
else:
pass
# merge dfs on gene_id, keep log2 fold changes only
merged = pd.merge(left=df.loc[agree],
right=fo_gc.loc[agree],
how='inner',
left_index=True,
right_index=True)
merged = merged[['log2FoldChange_x', 'log2FoldChange_y']]
columns = ['%s_l2fc' % p_name, 'fo_gc_l2fc']
merged.columns = columns
merged = merged.fillna(0.0)
ggPlotRScatter(merged, p_name, image_dir)
def ggPlotRScatter(df, p_name, image_dir):
'''
Generate scatter plots of Fo->GC vs time points
for intersecting differentially expressed genes.
Colour by |difference in log2 fold change|.
'''
df['diff'] = abs(df['%s_l2fc' % p_name] - df['fo_gc_l2fc'])
df = df.fillna(0.0)
# set up ggplot components in R
pandas2ri.activate()
R.assign('df', df)
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''p_base <- ggplot(df, aes(x=df[,1], '''
'''y=fo_gc_l2fc, colour=diff))''')
R('''geom <- geom_point(alpha=0.5)''')
R('''coloring <- scale_color_gradient(low='blue', high='red')''')
R('''labels <- labs(x="%(p_name)s log2 fold change", '''
'''y="Fo->GC log2 fold change",'''
'''title="log2 fold change correlation between\n'''
''' %(p_name)s and Fo->GC differential expression '''
'''analyses")''' % locals())
R('''xlimits <- xlim(-15, 15)''')
R('''ylimits <- ylim(-15, 15)''')
# put it all together
R('''p_tot = p_base + geom + coloring + labels + xlimits + ylimits''')
# save to image directoy
# need to switch on x11 plotting device for ggsave to work
# cannot use ggsave on cluster, revert to png(), do not turn on x11
# R.x11()
R('''png("%(image_dir)s/%(p_name)s-vsFo_GC.png")''' % locals())
R('''print(p_tot)''')
R('''dev.off()''')
@P.cluster_runnable
def unionOverlapFoVsGC(infile, fo_gc, outfile):
'''
Take the union of all temporally differentially
expressed genes and intersect if with the DE
genes between Fo - > GC
'''
name_list = infile.split("/")[-1].split("-")
out_name = name_list[0] + "-Fo_GC_Union-intersect.tsv"
df = pd.read_table(infile,
sep="\t",
header=0,
index_col=0)
de_genes = set(df.index)
fo_gc_de = fo_gc[fo_gc['padj'] <= 0.01].index
fo_gc_genes = [gx for gx in fo_gc_de if re.search("ENS", gx)]
intersect_genes = de_genes.intersection(fo_gc_genes)
with open(outfile, "w") as ofile:
for gene in intersect_genes:
ofile.write("{}\n".format(gene))
@P.cluster_runnable
def getCoreUnionGenes(infiles, outfile):
'''
Intersect all condition temporally DE genes that overlap
with Fo -> GC genes into a set of core DE genes
'''
gene_sets = []
for fle in infiles:
with open(fle, "r") as ifile:
genes = [gx.rstrip("\n") for gx in ifile.readlines()]
gene_sets.append(genes)
set1 = set(gene_sets[0])
for i in range(1, len(gene_sets)):
set2 = set(gene_sets[i])
set3 = set1.intersection(set2)
set1 = set3
with open(outfile, "w") as ofile:
for gene in set1:
ofile.write("{}\n".format(gene))
@P.cluster_runnable
def coreOverlapFoVsGC(infile, fo_gc):
'''
Take a list of differentially expressed genes
across each condition and time point, intersect with
FovsGC
'''
name_list = infile.split("/")[-1].split("_")
out_name = name_list[0] + "_" + name_list[2]
out_name = out_name.rstrip("-time.tsv")
out_name = out_name + "_Fo-GC-intersect.tsv"
df = pd.read_table(infile,
sep="\t",
header=0,
index_col=0)
# select differentially expressed genes with p <= 0.01
# intersect gene_ids and subset these for plotting
genes = df[df['padj'] <= 0.01].index.tolist()
fo_gc_genes = fo_gc[fo_gc['padj'] <= 0.01].index.tolist()
agree = []
disagree = []
overlap = set(genes).intersection(set(fo_gc_genes))
for x in overlap:
val1 = df.loc[x]['log2FoldChange']
val2 = fo_gc.loc[x]['log2FoldChange']
if (val1 > 0) and (val2 > 0):
agree.append(x)
elif (val1 < 0) and (val2 < 0):
agree.append(x)
else:
disagree.append(x)
# output list to file
with IOTools.openFile("FovsGC_compare.dir/%s" % out_name, "w") as outfile:
for gene in agree:
outfile.write("%s\n" % gene)
@P.cluster_runnable
def plotTimepointIntersection(infiles, outfile):
'''
Plot Venn diagram of intersection of gene lists
'''
inter_dict = {}
for fle in infiles:
header = fle.split("/")[-1].split("-")[0]
in_df = pd.read_table(fle, sep="\t", header=0, index_col=0)
genes = in_df.index.tolist()
inter_dict[header] = genes
biotype = fle.split("/")[-1].split("-")[-1].split("_")[-1]
biotype = biotype.split(".")[0]
out_dir = "/".join(outfile.split("/")[:-1])
TS.drawVennDiagram(inter_dict, biotype, out_dir)
@P.cluster_runnable
def getTimepointIntersections(infiles, n_times, outfile):
'''
Take first n timepoints and intersect for each in vitro
activation condition.
'''
# get gene lists from files
file_dictionary = {}
for infile in infiles:
gene_list = []
with IOTools.openFile(infile, "rb") as gene_file:
gene_list = gene_file.read().split("\n")
gene_list.remove('')
file_dictionary[infile] = gene_list
# get intersections across time points
time_point_dict = {}
for tme in n_times:
tpoints = [t for t in file_dictionary.keys() if re.search(str(tme), t)]
time_set = set(file_dictionary[tpoints[0]])
for i in range(1, len(tpoints)):
gene_list = file_dictionary[tpoints[i]]
time_set = time_set.intersection(gene_list)
time_point_dict[str(tme)] = time_set
# intersect all time points
core_set = set(time_point_dict[str(n_times[0])])
for j in range(1, len(time_point_dict.keys())):
core_set = core_set.intersection(time_point_dict[str(n_times[j])])
core_list = list(core_set)
core_genes = [ge for ge in list(core_list) if re.search("EN", ge)]
core_lncs = [lc for lc in list(core_list) if re.search("LNC", lc)]
mg = mygene.MyGeneInfo()
out_core = mg.querymany(core_genes,
scopes="ensemblgene",
fields="symbol",
returnall=True)['out']
out_df = pd.DataFrame(out_core)
out_df.drop(['notfound'], inplace=True, axis=1)
out_df.index = out_df['query']
out_df.drop_duplicates(subset='query', take_last=True, inplace=True)
out_df.drop(['query'], inplace=True, axis=1)
out_df.to_csv(outfile,
sep="\t",
index_label="gene_id")
condition = outfile.split("-")[0]
lnc_out = "%s-timepoint_intersection_lncRNAs.tsv" % condition
with IOTools.openFile(lnc_out, "w") as lnc_file:
lnc_file.write("lncRNA_id")
for lncrna in core_lncs:
lnc_file.write("%s\n" % lncrna)
@P.cluster_runnable
def getCoreGenes(infiles, n_times, outfile):
'''
Get files of gene lists that intersect with Fo-GC gene list
and intersect across conditions for each time point, then
across first n_times.
'''
# get gene lists from files
file_dictionary = {}
for infile in infiles:
gene_list = []
with IOTools.openFile(infile, "rb") as gene_file:
gene_list = gene_file.read().split("\n")
gene_list.remove('')
file_dictionary[infile] = gene_list
# get intersections across conditions at each time point
time_point_dict = {}
for tme in n_times:
tpoints = [t for t in file_dictionary.keys() if re.search(str(tme), t)]
time_set = set(file_dictionary[tpoints[0]])
for i in range(1, len(tpoints)):
gene_list = file_dictionary[tpoints[i]]
time_set = time_set.intersection(gene_list)
time_point_dict[str(tme)] = time_set
# intersect all time points
core_set = set(time_point_dict[str(n_times[0])])
for j in range(1, len(time_point_dict.keys())):
core_set = core_set.intersection(time_point_dict[str(n_times[j])])
core_list = list(core_set)
core_genes = [ge for ge in list(core_list) if re.search("EN", ge)]
core_lncs = [lc for lc in list(core_list) if re.search("LNC", lc)]
mg = mygene.MyGeneInfo()
out_core = mg.querymany(core_genes,
scopes="ensemblgene",
fields="symbol",
returnall=True)['out']
out_df = pd.DataFrame(out_core)
out_df.drop(['notfound'], inplace=True, axis=1)
out_df.index = out_df['query']
out_df.drop_duplicates(subset='query', take_last=True, inplace=True)
out_df.drop(['query'], inplace=True, axis=1)
out_df.to_csv(outfile,
sep="\t",
index_label="gene_id")
lnc_out = "%s-%s-core_lncRNAs.tsv" % (outfile.split("-")[0],
outfile.split("-")[1])
with IOTools.openFile(lnc_out, "w") as lnc_file:
for lncrna in core_lncs:
lnc_file.write("%s\n" % lncrna)
@P.cluster_runnable
def getConditionGenes(list_of_files, reference, outfile):
'''
Get time point intersection of genes and lncRNAs
specific to each condition for n time points.
'''
inter_dict = {}
# parse gene set per condition from files
for fle in list_of_files:
header = fle.split("/")[-1].split("-")[0]
in_df = pd.read_table(fle, sep="\t", header=0, index_col=0)
genes = in_df.index.tolist()
inter_dict[header] = set(genes)
# use difference_update iteratively to get difference between
# reference and all other conditions
spec_set = set(inter_dict[reference])
for pair in itertools.product(inter_dict.keys(), inter_dict.keys()):
# only get condition genes for specific condition
if pair[0] == reference:
if pair[0] == pair[1]:
pass
else:
spec_set.difference_update(inter_dict[pair[1]])
else:
pass
# detect genes - if so output gene symbols, otherwise skip and
# write outfile
if len([g for g in spec_set if re.search("ENS", g)]):
geneset = [q for q in spec_set]
mg = mygene.MyGeneInfo()
out_core = mg.querymany(geneset,
scopes="ensemblgene",
fields="symbol",
returnall=True)['out']
out_df = pd.DataFrame(out_core)
try:
out_df.drop(['notfound'], inplace=True, axis=1)
except ValueError:
pass
out_df.index = out_df['query']
out_df.drop_duplicates(subset='query', take_last=True, inplace=True)
out_df.drop(['query'], inplace=True, axis=1)
out_df.to_csv(outfile,
sep="\t",
index_label="gene_id")
else:
with IOTools.openFile(outfile, "w") as ofile:
for obj in spec_set:
ofile.write("%s\n" % obj)
@P.cluster_runnable
def correlateGeneLncRNA(gene_file, lnc_file, expression_file, outfile):
'''
Correlate gene and lncRNA expression for a given condition
using temporal correlation.
'''
if gene_file.endswith("gz"):
gene_comp = "gzip"
else:
gene_comp = None
if lnc_file.endswith("gz"):
lnc_comp = "gzip"
else:
lnc_comp = None
if expression_file.endswith("gz"):
expr_comp = "gzip"
else:
expr_comp = None
gene_list = pd.read_table(gene_file, sep="\t", index_col=0, header=0,
compression=gene_comp)
lnc_list = pd.read_table(lnc_file, sep="\t", index_col=0, header=None,
compression=lnc_comp)
expr_df = pd.read_table(expression_file, sep="\t", header=0,
index_col=0, compression=expr_comp)
gene_ids = gene_list.index.tolist()
lnc_ids = lnc_list.index.tolist()
corr_df = pd.DataFrame(index=gene_ids,
columns=lnc_ids)
corr_df = corr_df.fillna(0.0)
for x in itertools.product(gene_ids, lnc_ids):
gene_val = expr_df.loc[x[0]].tolist()
# difference in lncRNA expression thresholds may result in some lncRNA
# not being included in final expression tables
try:
lnc_val = expr_df.loc[x[1]].tolist()
corr = crossCorrelate(gene_val, lnc_val, lag=0)
corr_df.loc[x[0], x[1]] = corr
except KeyError:
corr_df.loc[x[0], x[1]] = 0.0
corr_df.to_csv(outfile, sep="\t",
index_label=None)
@P.cluster_runnable
def plotConditionHeatmap(infile, outfile):
'''
Plot heatmap of condition speicific cross-correlation
between genes and lncRNAs
'''
if infile.endswith("gz"):
comp = "gzip"
else:
comp = None
cor_df = pd.read_table(infile, sep="\t",
header=0, index_col=0,
compression=comp)
# remove duplicate entries
genes = cor_df.index
cor_df['genes'] = genes
cor_df.drop_duplicates(['genes'], inplace=True,
take_last=True)
cor_df.drop(['genes'], inplace=True, axis=1)
plotHeatmap(cor_df, outfile)
def plotHeatmap(dataframe_object, outfile):
'''
plot heatmap with R::heatmap.2
'''
r_matrix = pandas2ri.py2ri_pandasdataframe(dataframe_object)
R.assign("mat.vals", r_matrix)
R('''suppressPackageStartupMessages(library(RColorBrewer))''')
R('''suppressPackageStartupMessages(library(gplots))''')
R('''hmcol <- colorRampPalette(brewer.pal(9, "PuOr"))(100)''')
R('''png("%(outfile)s", units="in", res=300, '''
'''height=5.3, width=5.3)''' % locals())
R('''heatmap.2(as.matrix(mat.vals), trace="none", dendrogram="none", '''
'''col=hmcol, labRow=F, labCol=F, margins=c(6,6))''')
R('''dev.off''')
def correlate_lncsEigen(lncFile, eigenFile, correlation, lag=0):
'''
correlate lncRNA expression against module eigengene expression
'''
# average lncRNA expression across replicates and correlate with eigengene
# expression
if lncFile.split(".")[-1] == "gz":
compression = "gzip"
else:
compression = None
lncs_data = pd.read_table(lncFile, sep="\t",
header=0, index_col=0,
compression=compression)
eigen_data = pd.read_table(eigenFile,
index_col=0,
header=0,
sep="\t",
compression=compression)
corr_df = correlateLncRNAs(lncs_data,
eigen_data,
correlation,
lag)
return corr_df
@P.cluster_runnable
def generateBackground(infiles, outfile):
'''
Output gene list of background genes for GO enrichment
from a list of files.
Requires gene id be in ensembl format
'''
# just select protein-coding genes for background, exclude
# other genes/transcripts that may skew or distort
# final enrichments
gene_set = set()
if type(infiles) != tuple:
if infiles.endswith("gz"):
comp = "gzip"
else:
comp = None
df = pd.read_table(infiles, sep="\t", index_col=0,
header=0, compression=comp)
genes = df.index.tolist()
genes = [x for x in genes if re.search("ENS", x)]
gene_set.update(genes)
else:
for fle in infiles:
if fle.endswith("gz"):
comp = "gzip"
else:
comp = None
df = pd.read_table(fle, sep="\t", index_col=0,
header=0, compression=comp)
genes = df.index.tolist()
genes = [x for x in genes if re.search("ENS", x)]
gene_set.update(genes)
with IOTools.openFile(outfile, "w") as output:
for x in gene_set:
output.write("%s\n" % x)
@P.cluster_runnable
def goEnrichment(gene_set, bg_set, genome, db_ids, outfile, database):
'''
Perform GO enrichment on a single gene_set using the R/Bioconductor
package GOseq. Adjust for gene length only, not expression (later?)
database choices = GO:BP, GO:MF, GO:CC, KEGG
'''
gene_df = pd.read_table(gene_set, sep="\t", header=0, index_col=0)
gene_df['gene_id'] = gene_df.index
pc_genes = [x for x in gene_df['gene_id'] if re.search("ENS", x)]
gene_df = gene_df.loc[pc_genes]
gene_df.drop_duplicates(subset='gene_id', inplace=True, take_last=True)
bg_df = pd.read_table(bg_set, sep="\t", header=0, index_col=0)
bg_df['gene_id'] = bg_df.index
bg_df.drop_duplicates(subset='gene_id', inplace=True, take_last=True)
geneset_r = ro.StrVector([x for x in gene_df.index.tolist()])
bgset_r = ro.StrVector([l for l in bg_df.index.tolist()])
# make a vector of integers for degs
R('''core.de <- c(%s)''' % geneset_r.r_repr())
R('''bg <- c(%s)''' % bgset_r.r_repr())
R('''core.vector <- as.integer(bg%in%core.de)''')
R('''names(core.vector) <- bg''')
# generate pwf and perform GO enrichment
R('''sink(file="sink_file.txt")''')
R('''suppressPackageStartupMessages(library(goseq))''')
R('''source("/ifs/projects/proj036/go_enrichment/GO2term.R")''')
R('''pwf.core <- nullp(core.vector, "%(genome)s", '''
''' "%(db_ids)s", plot.fit=F)''' % locals())
R('''go.core <- goseq(pwf.core, "%(genome)s", '''
'''"%(db_ids)s", use_genes_without_cat=T, '''
'''test.cats=c("%(database)s"))''' % locals())
R('''GO.res <- GO2Term(go.core)''')
R('''sink(file=NULL)''')
go_df = pandas2ri.ri2pandas("GO.res")
go_df.to_csv(outfile, sep="\t", index_label="category")
@P.cluster_runnable
def clusterGOEnrichment(cluster_file, genome, db_ids, label, out_dir):
'''
Perform GO enrichment on genes within each cluster - uses custom R script
and Bioconductor package goseq
database choices = GO:BP, GO:CC, GO:MF, KEGG
'''
R('''sink(file="sink_file.txt")''')
R('''source("/ifs/projects/proj036/go_enrichment/GOseq_analysis.R")''')
R('''go_enrichment(infile="%(cluster_file)s", species="%(genome)s", '''
'''gene_id_db="%(db_ids)s", outfile_header="%(label)s", '''
'''out_dir="%(out_dir)s")''' % locals())
R('''sink(file=NULL)''')
@P.cluster_runnable
def topGO(go_file, expression_file, cluster_file, outfile):
'''
Calculate foldenrichments for each cluster gene ontolgoy
enrichment file
'''
go_name = go_file.split("/")[-1].split("_")[1].split("GO")[0]
go_frame = pd.read_table(go_file, sep="\t", index_col=None, header=0)
go_frame.index = go_frame['category']
express = pd.read_table(expression_file, sep="\t",
index_col=0, header=0)
expr_genes = express.index.tolist()
N = len(expr_genes)
if cluster_file.endswith("gz"):
comp = "gzip"
else:
comp = None
clust_df = pd.read_table(cluster_file, sep="\t",
index_col=0, header=None,
compression=comp)
clust_df.columns = ['gene_id', 'cluster']
clusters = set(clust_df['cluster'])
clust_dict = {}
for x in clusters:
clust_dict[x] = 0
for gene in clust_df.index.values:
clust_dict[clust_df.loc[gene]['cluster']] += 1
# calculate fold enrichments for all GO terms
n = int(clust_dict[go_name])
fun = lambda x: round((x['numDEInCat']/float(n))/(x['numInCat']/float(N)),
3)
enrich = go_frame.apply(fun, axis=1)
go_frame['foldEnrichment'] = enrich
go_frame.to_csv(outfile, sep="\t", index_label="category")
@P.cluster_runnable
def summariseGO(list_of_go, expression_file, cluster_file, outfile):
'''
Summarise gene ontology enrichments over a list of cluster
'''
go_dict = {}
for go in list_of_go:
go_name = go.split("/")[-1].split("_")[1].split("GO")[0]
_df = pd.read_table(go, sep="\t", index_col=None, header=0)
_df.index = _df['category']
go_dict[go_name] = _df
express = pd.read_table(expression_file, sep="\t",
index_col=0, header=0)
expr_genes = express.index.tolist()
N = len(expr_genes)
if cluster_file.endswith("gz"):
comp = "gzip"
else:
comp = None
clust_df = pd.read_table(cluster_file, sep="\t",
index_col=0, header=None,
compression=comp)
clust_df.columns = ['gene_id', 'cluster']
clusters = set(clust_df['cluster'])
clust_dict = {}
for x in clusters:
clust_dict[x] = 0
for gene in clust_df.index.values:
clust_dict[clust_df.loc[gene]['cluster']] += 1
cluster_series = pd.Series(clust_dict)
# calculate fold enrichments for all GO terms
for clust in go_dict.keys():
n = int(clust_dict[clust])
func = lambda x: round((x['numDEInCat']/float(n))/(x['numInCat']/float(N)),
3)
df_ = go_dict[clust]
enrich = df_.apply(func, axis=1)
df_['foldEnrichment'] = enrich
go_dict[clust] = df_
# summarise over all GO enrichments in clusters
# take top ten enriched terms from each cluster
top_dict = {}
for each in go_dict.keys():
go_df = go_dict[each]
go_df.sort(columns='padjust', inplace=True, ascending=True)
top_ = go_df.loc[go_df['ont'] == "BP"][0:10]
top_['padjust'] = [float("%0.3g" % x) for x in top_['padjust'].values]
top_dict[each] = top_
# select top enrichment from each cluster for summary table
one_dict = {}
for name in top_dict.keys():
one_df = top_dict[name]
one_series = pd.Series(one_df.iloc[0])
one_dict[name] = one_series
GO_df = pd.DataFrame(one_dict).T
GO_df['cluster'] = GO_df.index.tolist()
GO_df.sort(inplace=True, columns='padjust', ascending=True)
GO_df['padjust'] = [float("%0.3g" % x) for x in GO_df['padjust'].values]
GO_df = pd.merge(GO_df,
pd.concat([GO_df['cluster'], cluster_series], axis=1),
left_index=True, right_index=True)
GO_df = GO_df[['category', 'term', 'padjust', 'foldEnrichment', 'cluster']]
GO_df.sort(columns='padjust', inplace=True, ascending=True)
GO_df.drop(['cluster'], inplace=True, axis=1)
GO_df.to_csv(outfile, sep="\t", index_label="cluster")
@P.cluster_runnable
def classifyLncRNA(lnc_list, lnc_gtf, lnc_class, direction, threshold):
'''
Classify a lncRNA set based on their correlation with protein-coding
genes and positional classification.
e.g. negatively correlated anti-sense lncRNAs or positively
correlated intergenic lncRNAs
Only select multi-exon lncRNAs
'''
if lnc_list.split(".")[-1] == "gz":
comp = "gzip"
else:
comp = None
cor_df = pd.read_table(lnc_list, sep="\t",
header=0, index_col=None,
compression=comp)
if direction == "positive":
dir_cor = cor_df[cor_df['value'] >= 0]
elif direction == "negative":
dir_cor = cor_df[cor_df['value'] <= 0]
else:
raise AttributeError("Unrecognised correlation direction"
"Please supply positive or negative")
gene_re = re.compile("ENS")
gene_search = sum([1 for lx in dir_cor['gene_id'] if re.search(gene_re, lx)])
lnc_search = sum([1 for gx in dir_cor['lncRNA_id'] if re.search(gene_re, gx)])
if gene_search > lnc_search:
gene_col = 'gene_id'
lnc_col = 'lncRNA_id'
elif gene_search < lnc_search:
gene_col = 'lncRNA_id'
lnc_col = 'gene_id'
else:
raise ValueError('unable to determine gene ID column')
try:
dir_cor.index = dir_cor['set2']
except KeyError:
dir_cor.index = dir_cor[gene_col]
lnc_iter = GTF.transcript_iterator(GTF.iterator(IOTools.openFile(lnc_gtf)))
try:
lnc_cors = set(dir_cor['set1'].tolist())
except KeyError:
try:
lnc_cors = set(dir_cor['lncRNA'].tolist())
except KeyError:
lnc_cors = set(dir_cor[lnc_col].tolist())
try:
gene_ids = set(dir_cor['set2'].tolist())
except KeyError:
gene_ids = set(dir_cor[gene_col].tolist())
lnc_frame = pd.DataFrame(index=gene_ids, columns=['value'])
for trans in lnc_iter:
lnc_id = list(set([x.gene_id for x in trans]))[0]
lnc_source = list(set([x.source for x in trans]))[0]
exon_status = set([x.asDict()['exon_status_locus'] for x in trans])
lnc_status = list(exon_status)[0]
# lncRNAs with exon_status_locs == m are multi-exon lncRNAs
if lnc_source == lnc_class and lnc_status == 'm':
if lnc_id in lnc_cors:
try:
temp_frame = dir_cor[dir_cor['set1'] == lnc_id]
except KeyError:
try:
temp_frame = dir_cor[dir_cor['lncRNA'] == lnc_id]
except KeyError:
temp_frame = dir_cor[dir_cor[lnc_col] == lnc_id]
lnc_frame = lnc_frame.append(temp_frame)
else:
pass
# need to remove duplicate entries and NA's across genes and lncRNAs
not_na = lnc_frame[np.isfinite(lnc_frame['value'])]
try:
not_na = not_na.drop_duplicates(subset=['set2', 'set1', 'value'],
take_last=True)
except KeyError:
try:
not_na = not_na.drop_duplicates(subset=['gene_id', 'lncRNA_id',
'value'],
take_last=True)
except KeyError:
not_na = not_na.drop_duplicates(subset=['gene_id', 'lncRNA',
'value'],
take_last=True)
# drop cluster information if present
try:
not_na.drop(['gene_cluster', 'lncRNA_cluster'],
inplace=True, axis=1)
except ValueError:
pass
# catch bug in number of columns due to column name differences
if len(not_na.columns) > 3:
not_na.drop([gene_col], inplace=True, axis=1)
else:
pass
not_na.columns = [gene_col, lnc_col, 'value']
not_na.index = not_na[gene_col]
not_na = not_na.drop([gene_col], axis=1)
return not_na
def filter_correlation(infile,
threshold):
'''
filter lncRNAs with correlation below threshold
'''
cor_frame = pd.read_table(infile, sep="\t", index_col=0, header=0)
lncs_dict = {}
modules = cor_frame.index.values.tolist()
E.info("filtering correlations at threshold: %0.1f" % threshold)
for lncRNA in cor_frame.columns.values:
mod_dict = {}
for mod in modules:
if abs(cor_frame[lncRNA][mod]) >= threshold:
mod_dict[mod] = cor_frame[lncRNA][mod]
else:
pass
if len(mod_dict) > 0:
lncs_dict[lncRNA] = mod_dict
else:
pass
output_frame = pd.DataFrame(lncs_dict)
output_frame['gene_id'] = output_frame.index
cor_list = pd.melt(output_frame, id_vars='gene_id')
flat_cor = cor_list[cor_list['value'] >= threshold]
flat_cor.index = flat_cor['gene_id']
flat_cor.columns = ['gene_id', 'lncRNA', 'value']
flat_cor.drop(['gene_id'], inplace=True, axis=1)
return flat_cor
def classify_lncrna(infile,
lnc_gtf,
summary_file,
out_gtf):
'''
classify lncRNAs based on the direction of their correlation with
module eigengenes
'''
mod_frame = pd.read_table(infile, sep="\t", index_col=0, header=0)
modules = mod_frame.index.values.tolist()
lnc_file = IOTools.openFile(lnc_gtf, "r")
with open(out_gtf, "w") as gtf_file:
for line in GTF.readFromFile(lnc_file):
entry = GTF.Entry()
if line.gene_id in mod_frame:
entry.copy(line)
for mod in modules:
mod_val = mod_frame[entry.gene_id][mod]
entry.addAttribute(key=mod,
value=str(mod_val))
gtf_file.write("%s\n" % entry)
sources = set()
classification_dict = {}
with open(out_gtf, "r") as new_gtf:
for entry in GTF.readFromFile(new_gtf):
sources.add(entry.source)
for source in sources:
classification_dict[source] = 0
with open(out_gtf, "r") as openFile:
for entry in GTF.readFromFile(openFile):
classification_dict[entry.source] += 1
(pd.Series(classification_dict)).to_csv(summary_file,
sep="\t")
pos_lncs = {}
neg_lncs = {}
with open(out_gtf, "r") as newfile:
for entry in GTF.readFromFile(newfile):
for mod in modules:
try:
assert entry.asDict()[mod]
if float(entry.asDict()[mod]) < 0:
neg_lncs[entry.gene_id] = {'source': entry.source,
mod: 'negative'}
elif float(entry.asDict()[mod]) > 0:
pos_lncs[entry.gene_id] = {'source': entry.source,
mod: 'positive'}
except(KeyError):
pass
pos_series = pd.Series(pos_lncs)
neg_series = pd.Series(neg_lncs)
all_series = pos_series.append(neg_series)
return all_series
def classCorrLncRNA(cluster_file,
gene_express,
lncRNA_express,
lnc_gtf,
threshold,
lncrna_class,
corr_direction):
'''
Classify lncRNAs based on correlation direction with protein
coding gene expression
'''
def correlateDataFrames(df1, df2):
'''
Correlate 2 different dataframes, assuming matching column IDs
but different indexes. Default = Pearson correlation.
'''
# setup indices for dfs
idx1 = df1.index.tolist()
idx2 = df2.index.tolist()
indexes = itertools.product(idx1, idx2)
# create empty correlation dataframe
correlate_frame = pd.DataFrame(index=idx1,
columns=idx2)
correlate_frame.fillna(0.0)
for index in indexes:
df1_series = df1.loc[index[0]].values[:-1]
df2_series = df2.loc[index[1]].values
# calculate Pearson correlation using numpy.corrcoef
# np.corrcoef returns correlation matrix - need to index for
# non-identity value
corr = np.corrcoef(df1_series, df2_series)[0][1]
correlate_frame.loc[index[0], index[1]] = corr
return correlate_frame
# for each cluster correlate lncRNAs against protein-coding genes
# use Pearson correlation for now, but what about cross-correlation
# or some sort of temporal correlation measure?
clusters = pd.read_table(cluster_file,
sep="\t",
header=0,
index_col=0)
clusters.columns = ['gene', 'cluster']
cluster_cols = set(clusters['cluster'].tolist())
gene_express = pd.read_table(gene_express,
sep="\t",
index_col=0,
header=0)
lnc_express = pd.read_table(lncRNA_express,
sep="\t",
index_col=0,
header=0)
lnc_express['lncRNA'] = lnc_express.index.tolist()
lnc_dict = {}
lnc_index = GTF.iterator(IOTools.openFile(lnc_gtf))
for lnc in lnc_index:
lnc_dict[lnc.gene_id] = lnc.source
class_frame = pd.DataFrame({'class': lnc_dict})
intergenic_lncs = lnc_express[class_frame['class'] == lncrna_class]
file_prefix = cluster_file.split("/")[1].split("-")[0]
# just pull out the intergenic lncRNAs
correlation_dict = {}
all_correlations = {}
for col in cluster_cols:
# setup dataframe for cluster
col_data = clusters[clusters['cluster'] == col]
col_data.index = col_data['gene']
col_data.drop(['gene'], inplace=True, axis=1)
col_data_genes = col_data.index.tolist()
# setup cluster-specific gene expression dataframe
col_gene_express = gene_express.loc[col_data_genes]
cor_frame = correlateDataFrames(intergenic_lncs,
col_gene_express)
# select lncRNAs on correlation
correlated_lncs = []
if corr_direction == "positive":
for lncrna in cor_frame.index:
if any([True for x in cor_frame.loc[lncrna] if x > threshold]):
correlated_lncs.append(lncrna)
else:
pass
elif corr_direction == "negative":
for lncrna in cor_frame.index:
lnc_correlations = cor_frame.loc[lncrna]
logic_list = [True for x in lnc_correlations if x < -threshold]
if any(logic_list):
correlated_lncs.append(lncrna)
else:
pass
lncs_cor_frame = cor_frame.loc[correlated_lncs]
correlation_dict[col] = correlated_lncs
# write out each correlation matrix to a separate file with cluster ID
# write out list of correlated lncRNA IDs to file
class_dir = "lncRNA_classification.dir"
correlation_out = "%s/%s-%s-%s-correlations.tsv" % (class_dir,
file_prefix,
col,
corr_direction)
lncs_cor_frame.to_csv(correlation_out,
sep="\t")
correlated_lncs_out = "%s/%s-%s-%s-lncRNAs.tsv" % (class_dir,
file_prefix,
col,
corr_direction)
lnc_out = IOTools.openFile(correlated_lncs_out, "w")
for lnc in correlated_lncs:
lnc_out.write("%s\n" % lnc)
lnc_out.close()
all_correlations[col] = cor_frame
# iteratively merge each correlation frame onto the previous one
# use lncRNAs IDs as index/keys
total_frame = pd.concat(all_correlations.values(),
axis=1)
return total_frame
@P.cluster_runnable
def list2GTF(list_of_ids, gtf_file, out_gtf):
'''
Turn a list of gene/lncRNA ids into a gtf file
'''
gtf_it = GTF.transcript_iterator(GTF.iterator(IOTools.openFile(gtf_file)))
with IOTools.openFile(out_gtf, "w") as outfile:
for trans in gtf_it:
for exon in trans:
if exon.gene_id in list_of_ids:
entry = GTF.Entry()
entry = entry.copy(exon)
outfile.write("%s\n" % entry)
else:
pass
def correlationPairs(infile, threshold):
'''
Take a list of gene:lncRNA pairs with correlation coefficients.
Output gene:lncRNA pairs with correlation >= threshold.
Input table is in format gene_id:gene_cluster:lncRNA:lncRNA_cluster:value
'''
if infile.split(".")[-1] == "gz":
comp = "gzip"
else:
comp = None
cors_df = pd.read_table(infile, sep="\t", header=0,
compression=comp, index_col=None)
cors_df.columns = ['lncRNA_id', 'gene_id', 'value']
gene_re = re.compile("ENS")
gene_search = sum([1 for lx in cors_df['gene_id'] if re.search(gene_re, lx)])
lnc_search = sum([1 for gx in cors_df['lncRNA_id'] if re.search(gene_re, gx)])
if gene_search > lnc_search:
gene_col = 'gene_id'
lnc_col = 'lncRNA_id'
elif gene_search < lnc_search:
gene_col = 'lncRNA_id'
lnc_col = 'gene_id'
else:
raise ValueError('unable to determine gene ID column')
try:
lnc_set = set(cors_df[lnc_col])
except KeyError:
lnc_set = set(cors_df['lncRNA'])
lncs_nn = {}
idx = 0
for lnc in lnc_set:
try:
l_df = cors_df[cors_df[lnc_col] == lnc]
except KeyError:
l_df = cors_df[cors_df['lncRNA'] == lnc]
if threshold >= 0:
mgene = l_df[l_df['value'] >= threshold]
elif threshold <= 0:
mgene = l_df[l_df['value'] <= threshold]
for xlnc in mgene.index:
lncs_nn[str(idx)] = {'lncRNA_id': lnc,
'gene_id': mgene.loc[xlnc][gene_col],
'value': mgene.loc[xlnc]['value']}
idx += 1
out_frame = pd.DataFrame(lncs_nn).T
out_frame.index = out_frame['gene_id']
out_frame.drop(['gene_id'], axis=1, inplace=True)
return out_frame
def maxCorrelationPairs(infile):
'''
Take a list of gene:lncRNA pairs with correlation coefficients.
Output gene:lncRNA pairs with maximal cross-correlation.
Input table is in format gene_id:gene_cluster:lncRNA:lncRNA_cluster:value
'''
if infile.split(".")[-1] == "gz":
comp = "gzip"
else:
comp = None
cors_df = pd.read_table(infile, sep="\t", header=0,
compression=comp, index_col=None)
cors_df.columns = ['lncRNA_id', 'gene_id', 'value']
gene_re = re.compile("ENS")
gene_search = sum([1 for lx in cors_df['gene_id'] if re.search(gene_re, lx)])
lnc_search = sum([1 for gx in cors_df['lncRNA_id'] if re.search(gene_re, gx)])
if gene_search > lnc_search:
gene_col = 'gene_id'
lnc_col = 'lncRNA_id'
elif gene_search < lnc_search:
gene_col = 'lncRNA_id'
lnc_col = 'gene_id'
else:
raise ValueError('unable to determine gene ID column')
try:
lnc_set = set(cors_df[lnc_col])
except KeyError:
lnc_set = set(cors_df['lncRNA'])
lncs_nn = {}
idx = 0
for lnc in lnc_set:
try:
l_df = cors_df[cors_df[lnc_col] == lnc]
except KeyError:
l_df = cors_df[cors_df['lncRNA'] == lnc]
max_cor = max(l_df['value'])
mgene = l_df.loc[l_df['value'] == max_cor, gene_col].values[0]
lncs_nn[str(idx)] = {'lncRNA_id': lnc,
'gene_id': mgene,
'value': max_cor}
idx += 1
out_frame = pd.DataFrame(lncs_nn).T
out_frame.index = out_frame['gene_id']
out_frame.drop(['gene_id'], axis=1, inplace=True)
return out_frame
def testGOCatOverlap(eigen_ids,
correlation_frame,
threshold,
go_terms_dict,
all_terms):
'''
Test significance of overlap for GO enrichment categories
'''
eigen_combs = itertools.combinations(eigen_ids, 2)
fisherpy = R['fisher.test']
q_py = R['p.adjust']
sig_dict = {}
for x in eigen_combs:
if x[0] != x[1] and correlation_frame.loc[x[0]][x[1]] >= threshold:
contingency = np.zeros((2, 2))
g1 = set(go_terms_dict[x[0]])
g2 = set(go_terms_dict[x[1]])
intersect = g1.intersection(g2)
g1_diff = g1.difference(g2)
g2_diff = g2.difference(g1)
all_diff = all_terms.difference(g1).difference(g2)
contingency[0, 0] = len(intersect)
contingency[1, 0] = len(g1_diff)
contingency[0, 1] = len(g2_diff)
contingency[1, 1] = len(all_diff)
f_p = fisherpy(numpy2ri(contingency))
f_py = [list(k) for k in np.array(f_p)]
pvalue = f_py[0][0]
odds = f_py[2][0]
l_ci = f_py[1][0]
u_ci = f_py[1][1]
sig_dict[x] = {'OR': odds,
'pvalue': pvalue,
'lower_ci': l_ci,
'upper_ci': u_ci}
sig_df = pd.DataFrame(sig_dict).T
sig_df['qvalue'] = q_py(sig_df['pvalue'], "BH")
# select clusters for merging with adjusted p < 0.01
to_merge = sig_df[sig_df['qvalue'] < 0.01]
return to_merge
def mergeClusters(eigen_file, consensus_file, go_dir, threshold):
'''
Merge clusters based on their functional enrichment and the
correlation of their expression profiles.
'''
name = eigen_file.split("/")[1].split("-")[0]
eigen_df = pd.read_table(eigen_file,
sep="\t",
header=0,
index_col=0)
go_files = [x for x in os.listdir(go_dir) if re.search(name + "-enrich",
x)]
# calculate all eigengene correlations
eigen_cor = pd.DataFrame(index=eigen_df.index, columns=eigen_df.index)
eigen_cor = eigen_cor.fillna(0.0)
eigen_ids = itertools.combinations_with_replacement(eigen_df.index, 2)
for each in eigen_ids:
val1 = eigen_df.loc[each[0]].tolist()
val2 = eigen_df.loc[each[1]].tolist()
corr = tempCorr(val1, val2)
eigen_cor.loc[each[0]][each[1]] = corr
eigen_cor.loc[each[1]][each[0]] = corr
go_terms_dict = {}
all_terms = set()
for fle in go_files:
name = fle.split("_")[1].split(".")[0]
_go_df = pd.read_table(go_dir + fle, sep="\t", header=0, index_col=1)
_go_df.sort(columns='foldEnrichment', ascending=False)
go_terms_dict[name] = _go_df[_go_df['padjust'] < 0.01].index.tolist()
all_terms.update(_go_df.index.tolist())
# test for statistically significant overlap in GO categories
# between cluster GO enrichments with Fisher's exact test
# only test overlap for clusters with correlation >= threshold
sig_df = testGOCatOverlap(eigen_ids=eigen_df.index,
correlation_frame=eigen_cor,
threshold=threshold,
go_terms_dict=go_terms_dict,
all_terms=all_terms)
return sig_df
def makeSplicedFasta(infile, outfile):
'''
Merge fasta sequences together into a single
spliced transcript sequence
'''
fasta_dict = {}
with IOTools.openFile(infile, "rb") as fafile:
for line in fafile.readlines():
if line[0] == '>':
header = line.rstrip("\n")
fasta_dict[header] = ''
else:
fasta_dict[header] += line.rstrip("\n")
with IOTools.openFile(outfile, "w") as ofile:
for key, value in fasta_dict.items():
ofile.write("%s\n%s\n" % (key, value))
def targetScanWrapper(miRNA_file, target_file, outfile):
'''
Python wrapper for MRE prediction by targetScan
'''
# target scan must be in the current working directoy
assert os.path.exists("targetscan_60.pl")
job_options = "-l mem_free=4G"
statement = '''
perl targetscan_60.pl %(miRNA_file)s %(target_file)s %(outfile)s
> %(outfile)s.log'''
P.run()
def clusterSummary(list_of_files, outfile):
'''
Generate a summary table from consensus clustering
'''
# condition: n clusters: median objects per cluster: median length of objs
file_dict = {}
for fle in list_of_files:
fname = fle.split("/")[-1]
condition = fname.split("-")[0]
ref = fname.split("-")[1] + "gtf.gz"
df_ = pd.read_table(fle, sep="\t", header=0, index_col=0)
df_.columns = ["gene_id", "cluster"]
clust_dict = {}
for idx in df_.index:
cluster = df_.loc[idx]['cluster']
try:
clust_dict[cluster] += 1
except KeyError:
clust_dict[cluster] = 1
med_size = np.median(clust_dict.value())
file_dict[fname] = {'condition': condition,
'reference': ref,
'median_cluster_size': med_size}
outframe = pd.DataFrame(file_dict).T
outframe.to_csv(outfile, sep="\t", index_label="input_file")
@P.cluster_runnable
def plotClusterHeatmaps(eigengenes, expression, clusters, image_dir):
'''
Generate a plot of expression for each cluster with matching
eigengene expression
'''
if expression.endswith("gz"):
expr_comp = "gzip"
else:
expr_comp = None
if eigengenes.endswith("gz"):
eigen_comp = "gzip"
else:
eigen_comp = None
if clusters.endswith("gz"):
clust_comp = "gzip"
else:
clust_comp = None
expr = pd.read_table(expression, sep="\t",
header=0, index_col=0,
compression=expr_comp)
clust_df = pd.read_table(clusters, sep="\t",
header=None, index_col=0,
compression=clust_comp)
clust_df.columns = ['gene_id', 'cluster']
eigens = pd.read_table(eigengenes, sep="\t",
header=0, index_col=0,
compression=eigen_comp)
mg = mygene.MyGeneInfo()
condition = eigengenes.split("/")[-1].split("-")[0]
reference = eigengenes.split("/")[-1].split("-")[2]
all_clusts = set(clust_df['cluster'])
for clust in all_clusts:
genes = clust_df[clust_df['cluster'] == clust]['gene_id'].tolist()
gene_expr = expr.loc[genes]
clust_eigen = eigens.loc[clust]
if reference == "refcoding":
# get gene symbols - if missing replace with ensembl ID
mg_out = mg.querymany(genes, scopes="ensemblgene", fields="symbol",
species="mouse", returnall=True)['out']
sym_df = pd.DataFrame(mg_out)
sym_df.index = sym_df['query']
c_df = pd.merge(left=gene_expr, right=sym_df, how='inner',
left_index=True, right_index=True)
# get notfound IDs and replace with ensembl
try:
nf_df = c_df.loc[c_df['notfound'] == True]
nf_df['symbol'] = nf_df['query']
c_df.loc[nf_df.index] = nf_df
c_df.drop(['_id', 'notfound', 'query'], inplace=True, axis=1)
except KeyError:
c_df.drop(['_id', 'query'], inplace=True, axis=1)
# drop extraneous columns and remove duplicate entries based on
# gene symbol
c_df.index = c_df['symbol']
c_df.drop_duplicates(subset=['symbol'],
take_last=True, inplace=True)
c_df.drop(['symbol'], inplace=True, axis=1)
c_ids = c_df.index
else:
c_df = gene_expr
c_ids = gene_expr.index
# push objects into R and plot heatmaps
r_ids = ro.StrVector([rs for rs in c_ids])
r_clust = pandas2ri.py2ri_pandasdataframe(c_df)
r_eigen = ro.FloatVector([fe for fe in clust_eigen.values])
R.assign("gnames", r_ids)
R.assign("gexprs", r_clust)
R.assign("geigen", r_eigen)
# plot heatmaps
R('''suppressPackageStartupMessages(library(gplots))''')
R('''suppressPackageStartupMessages(library(RColorBrewer))''')
R('''colnames(gexprs) <- c(0,1,3,6,12,24,48,72,96,120)''')
R('''rownames(gexprs) <- gnames''')
# create color vector proportional to eigengene expression
R('''eigen_func <- colorRampPalette(brewer.pal(9, "BuPu"))''')
R('''eigen_col <- eigen_func(length(unique(geigen'''
''')))[as.factor(-1*geigen)]''')
R('''hmcol <- colorRampPalette(brewer.pal(9, "BuPu"))(100)''')
outfile = "-".join([condition, reference,
clust, "expression_heatmap.png"])
outfile = image_dir + "/" + outfile
# output to png device
R('''png('%s', height=480, width=480)''' % outfile)
R('''heatmap.2(as.matrix(gexprs), trace="none", col=hmcol,'''
'''dendrogram="none",density.info="none", ColSideColors=eigen_col,'''
'''margins=c(6,12), cexCol=2.0, labRow=F, Colv=colnames(gexprs))''')
R('''dev.off()''')
if reference == "refcoding":
txt_file = outfile.rstrip("_heatmap.png")
txt_file = txt_file + "_gene_symbols.tsv"
# output file with gene symbol and ensembl IDs
out_df = sym_df['symbol']
out_df.columns = ['ensembl']
out_df.to_csv(txt_file, sep="\t",
index_label="gene_symbol")
else:
txt_file = outfile.rstrip("_heatmap.png")
txt_file = txt_file + "_lncRNA_ids.tsv"
with open(txt_file, "w") as ofile:
for lnc in c_df.index:
ofile.write("%s\n" % lnc)
@P.cluster_runnable
def plotEigenHeatmaps(eigengenes, image_dir):
'''
Plot a heatmap of eigengene correlations
'''
if eigengenes.endswith("gz"):
comp = "gzip"
else:
comp = None
cor_df = pd.read_table(eigengenes, sep="\t",
index_col=0, header=0,
compression=comp)
cols = cor_df.columns
rows = cor_df.index
# push into R environment for plotting
r_cols = ro.StrVector([rc for rc in cols])
r_rows = ro.StrVector([rr for rr in rows])
r_df = pandas2ri.py2ri_pandasdataframe(cor_df)
R.assign("r.cols", r_cols)
R.assign("r.rows", r_rows)
R.assign("cor.mat", r_df)
R('''suppressPackageStartupMessages(library(gplots))''')
R('''suppressPackageStartupMessages(library(RColorBrewer))''')
cond = eigengenes.split("/")[-1].split("-")[0]
ofile = "-".join([cond, "eigengene-correlation-heatmap.png"])
ofile = "/".join([image_dir, ofile])
R('''rownames(cor.mat) <- r.rows''')
R('''colnames(cor.mat) <- r.cols''')
R('''hmcol <- colorRampPalette(brewer.pal(9, "PuOr"))(100)''')
R('''png('%s', height=480, width=480)''' % ofile)
R('''heatmap.2(as.matrix(cor.mat), trace="none", col=hmcol,'''
'''dendrogram="none", density.info="none")''')
R('''dev.off()''')
@P.cluster_runnable
def mirEnrichment(cerna_file,
mre_file,
pairs_gtf,
mirna_file):
'''
Test for enrichment of specific miRNAs amongst ceRNAs and partner
gene 3' UTRs
Requirements:
* .gtf file of MREs
* gtf
'''
if cerna_file.endswith("gz"):
cerna_comp = "gzip"
else:
cerna_comp = None
cerna_df = pd.read_table(cerna_file, sep="\t", header=0,
index_col=0,
compression=cerna_comp)
catalog = getMREs(mre_file, pairs_gtf)
mirnas = set()
with IOTools.openFile(mirna_file, "rb") as ofile:
for line in ofile.readlines():
mirnas.add(line.rstrip("\n"))
fisherpy = R["fisher.test"]
padjpy = R["p.adjust"]
results = []
pvalues = []
lmirs = {}
gmirs = {}
lnc_seen = set()
gene_seen = set()
# create dicts of ceRNA miRs and partner miRs
for idx in cerna_df.index:
gene = cerna_df.loc[idx]['gene_id']
lnc = cerna_df.loc[idx]['lncRNA_id']
if gene not in gene_seen:
gmirs[gene] = [g for g in catalog[gene]]
else:
pass
if lnc not in lnc_seen:
lmirs[lnc] = [l for l in catalog[lnc]]
else:
pass
lnc_seen.update(lnc)
gene_seen.update(gene)
# generate contingency tables and test enrichment
# of each miRNA
for mir in mirnas:
contingency = np.zeros((2, 2))
for lnc in lmirs.keys():
if mir in lmirs[lnc]:
contingency[0, 0] += 1
elif mir not in lmirs[lnc]:
contingency[1, 0] += 1
for gene in gmirs.keys():
if mir in gmirs[gene]:
contingency[0, 1] += 1
elif mir not in gmirs[gene]:
contingency[1, 1] += 1
# run Fisher's exact test in R
f = fisherpy(numpy2ri.numpy2ri(contingency), alternative="greater")
# get overlap numbers
ncerna = contingency[0, 0]
npartners = contingency[0, 1]
tcerna = contingency[0, 0] + contingency[1, 0]
tpartners = contingency[0, 1] + contingency[1, 1]
# convert fishers back to python
fx = [list(x) for x in np.array(f)]
# fisher.test returns pval, CIs, OR
pvalue, ci_low, ci_hi, OR = fx[0][0], fx[1][0], fx[1][1], fx[2][0]
pvalues.append(pvalue)
# set default OR to 1
if (ncerna + npartners == 0 or
(ncerna == tcerna and npartners == tpartners)):
OR = 1
results.append([mir, OR, ci_low, ci_hi, pvalue, ncerna,
npartners, tcerna, tpartners])
qvalues = padjpy(pvalues)
for i in range(len(results)):
yield results[i], [qvalues[i]]
def runSailfishIndex(fasta_file, outdir, threads,
kmer):
'''
Wrapper for sailfish index
'''
if fasta_file.endswith(".fa"):
pass
elif fasta_file.endswith(".fasta"):
pass
else:
E.warn("are you sure this is a fasta file?")
command = '''
sailfish index --transcripts %s --out %s --threads %i --kmerSize %i
''' % (fasta_file, outdir, threads, kmer)
os.system(command)
def runSailfishQuant(fasta_index, fastq_files, output_dir,
paired=False, library="ISF", threads=4,
gene_gtf=None):
'''
Wrapper for sailfish quant command
'''
decompress = False
if len(fastq_files) > 1:
if fastq_files[0].endswith(".gz"):
decompress = True
else:
pass
else:
if fastq_files[0].endswith(".gz"):
decompress = True
else:
pass
# check output directory is an absolute path
if os.path.isabs(output_dir):
pass
else:
out_dir = os.path.abspath(output_dir)
states = []
command = " sailfish quant --index %s -l %s -o %s " % (fasta_index,
library,
output_dir)
states.append(command)
if threads:
states.append(" --threads %i " % threads)
else:
pass
if gene_gtf:
states.append(" --geneMap %s " % gene_gtf)
else:
pass
# sailfish does not handle compress files natively,
# need to decompress on the fly with advanced
# bash syntax
if decompress and paired:
first_mates = tuple([fq for fq in fastq_files if re.search("fastq.1.gz", fq)])
fstr_format = " ".join(["%s" for hq in first_mates])
fdecomp_format = fstr_format % first_mates
decomp_first = " -1 <(zcat %s)" % fdecomp_format
states.append(decomp_first)
second_mates = tuple([sq for sq in fastq_files if re.search("fastq.2.gz", sq)])
sstr_format = " ".join(["%s" for aq in second_mates])
sdecomp_format = sstr_format % second_mates
decomp_second = " -2 <(zcat %s)" % sdecomp_format
states.append(decomp_second)
elif decompress and not paired:
first_mates = tuple([fq for fq in fastq_files if re.search("fastq.1.gz", fq)])
fstr_format = " ".join(["%s" for sq in first_mates])
fdecomp_format = fstr_format % first_mates
decomp_first = " -r <(zcat %s)" % fdecomp_format
states.append(decomp_first)
elif paired and not decompress:
first_mates = tuple([fq for fq in fastq_files if re.search("fastq.1", fq)])
fstr_format = " ".join(["%s" for sq in first_mates])
fdecomp_format = fstr_format % first_mates
decomp_first = " -1 %s " % fdecomp_format
states.append(decomp_first)
second_mates = tuple([sq for sq in fastq_files if re.search("fastq.2", sq)])
sstr_format = " ".join(["%s" for aq in second_mates])
sdecomp_format = sstr_format % second_mates
decomp_second = " -2 %s " % sdecomp_format
states.append(decomp_second)
statement = " ".join(states)
# subprocess cannot handle process substitution
# therefore needs to be wrapped in /bin/bash -c '...'
# for bash to interpret the substitution correctly
process = subprocess.Popen(statement, shell=True,
executable="/bin/bash")
stdout, stderr = process.communicate()
if process.returncode != 0:
raise OSError(
"-------------------------------------------\n"
"Child was terminated by signal %i: \n"
"The stderr was \n%s\n%s\n"
"-------------------------------------------" %
(-process.returncode, stderr, statement))
def runKallistoIndex(fasta_file, outfile, kmer=31):
'''
Wrapper for kallisto index
'''
if fasta_file.endswith(".fa"):
pass
elif fast_file.endswith(".fasta"):
pass
else:
E.warn("are you sure this is a fasta file?")
command = "kallisto index --index=%s %s" % (outfile,
fasta_file)
os.system(command)
def runKallistoQuant(fasta_index, fastq_files, output_dir,
bias=False, bootstrap=None,
seed=1245, threads=None, plaintext=False):
'''
Wrapper for kallisto quant command
'''
if len(fastq_files) > 1:
fastqs = " ".join(fastq_files)
else:
fastqs = fastq_files
# check output directory is an absolute path
if os.path.isabs(output_dir):
pass
else:
out_dir = os.path.abspath(output_dir)
states = []
command = " kallisto quant --index=%s --output-dir=%s" % (fasta_index,
output_dir)
states.append(command)
if bias:
states.append(" --use-bias ")
else:
pass
if bootstrap:
states.append(" --bootstrap=%i --seed=%i " % (bootstrap,
seed))
else:
pass
if plaintext:
states.append(" --plaintext ")
else:
pass
if threads:
states.append(" --threads=%i " % threads)
else:
pass
states.append(" %s " % fastqs)
statement = " ".join(states)
# need to rename output files to conform to input/output
# pattern as required. Default name is abundance*.txt
# when using plaintext output
# kaliisto requires an output directory - create many small
# directories, one for each file.
# then extract the abundance.txt file and rename using the
# input/output pattern
os.system(statement)
| mit |
mitdrc/director | src/python/director/planplayback.py | 2 | 7145 | import os
import vtkAll as vtk
import math
import time
import re
import numpy as np
from director.timercallback import TimerCallback
from director import objectmodel as om
from director.simpletimer import SimpleTimer
from director.utime import getUtime
from director import robotstate
import pickle
import scipy.interpolate
def asRobotPlan(msg):
'''
If the given message is a robot_plan_with_supports_t then this function returns
the plan message contained within it. For any other message type, this function
just returns its input argument.
'''
try:
import drc as lcmdrc
except ImportError:
pass
else:
if isinstance(msg, lcmdrc.robot_plan_with_supports_t):
return msg.plan
return msg
class PlanPlayback(object):
def __init__(self):
self.animationCallback = None
self.animationTimer = None
self.interpolationMethod = 'slinear'
self.playbackSpeed = 1.0
self.jointNameRegex = ''
@staticmethod
def getPlanPoses(msgOrList):
if isinstance(msgOrList, list):
messages = msgOrList
allPoseTimes, allPoses = PlanPlayback.getPlanPoses(messages[0])
for msg in messages[1:]:
poseTimes, poses = PlanPlayback.getPlanPoses(msg)
poseTimes += allPoseTimes[-1]
allPoseTimes = np.hstack((allPoseTimes, poseTimes[1:]))
allPoses += poses[1:]
return allPoseTimes, allPoses
else:
msg = asRobotPlan(msgOrList)
poses = []
poseTimes = []
for plan in msg.plan:
pose = robotstate.convertStateMessageToDrakePose(plan)
poseTimes.append(plan.utime / 1e6)
poses.append(pose)
return np.array(poseTimes), poses
@staticmethod
def getPlanElapsedTime(msg):
msg = asRobotPlan(msg)
startTime = msg.plan[0].utime
endTime = msg.plan[-1].utime
return (endTime - startTime) / 1e6
def stopAnimation(self):
if self.animationTimer:
self.animationTimer.stop()
def setInterpolationMethod(method):
self.interpolationMethod = method
def playPlan(self, msg, jointController):
self.playPlans([msg], jointController)
def playPlans(self, messages, jointController):
assert len(messages)
poseTimes, poses = self.getPlanPoses(messages)
self.playPoses(poseTimes, poses, jointController)
def getPoseInterpolatorFromPlan(self, message):
poseTimes, poses = self.getPlanPoses(message)
return self.getPoseInterpolator(poseTimes, poses)
def getPoseInterpolator(self, poseTimes, poses, unwrap_rpy=True):
if unwrap_rpy:
poses = np.array(poses, copy=True)
poses[:,3:6] = np.unwrap(poses[:,3:6],axis=0)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(poseTimes, poses, axis=0, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.pchip(poseTimes, poses, axis=0)
return f
def getPlanPoseMeshes(self, messages, jointController, robotModel, numberOfSamples):
poseTimes, poses = self.getPlanPoses(messages)
f = self.getPoseInterpolator(poseTimes, poses)
sampleTimes = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
meshes = []
for sampleTime in sampleTimes:
pose = f(sampleTime)
jointController.setPose('plan_playback', pose)
polyData = vtk.vtkPolyData()
robotModel.model.getModelMesh(polyData)
meshes.append(polyData)
return meshes
def showPoseAtTime(self, time, jointController, poseInterpolator):
pose = poseInterpolator(time)
jointController.setPose('plan_playback', pose)
def playPoses(self, poseTimes, poses, jointController):
f = self.getPoseInterpolator(poseTimes, poses)
timer = SimpleTimer()
def updateAnimation():
tNow = timer.elapsed() * self.playbackSpeed
if tNow > poseTimes[-1]:
pose = poses[-1]
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
return False
pose = f(tNow)
jointController.setPose('plan_playback', pose)
if self.animationCallback:
self.animationCallback()
self.animationTimer = TimerCallback()
self.animationTimer.targetFps = 60
self.animationTimer.callback = updateAnimation
self.animationTimer.start()
updateAnimation()
def picklePlan(self, filename, msg):
poseTimes, poses = self.getPlanPoses(msg)
pickle.dump((poseTimes, poses), open(filename, 'w'))
def getMovingJointNames(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
return jointNames
def plotPlan(self, msg):
poseTimes, poses = self.getPlanPoses(msg)
self.plotPoses(poseTimes, poses)
def plotPoses(self, poseTimes, poses):
import matplotlib.pyplot as plt
poses = np.array(poses)
if self.jointNameRegex:
jointIds = range(poses.shape[1])
else:
diffs = np.diff(poses, axis=0)
jointIds = np.unique(np.where(diffs != 0.0)[1])
jointNames = [robotstate.getDrakePoseJointNames()[jointId] for jointId in jointIds]
jointTrajectories = [poses[:,jointId] for jointId in jointIds]
seriesNames = []
sampleResolutionInSeconds = 0.01
numberOfSamples = (poseTimes[-1] - poseTimes[0]) / sampleResolutionInSeconds
xnew = np.linspace(poseTimes[0], poseTimes[-1], numberOfSamples)
fig = plt.figure()
ax = fig.add_subplot(111)
for jointId, jointName, jointTrajectory in zip(jointIds, jointNames, jointTrajectories):
if self.jointNameRegex and not re.match(self.jointNameRegex, jointName):
continue
x = poseTimes
y = jointTrajectory
y = np.rad2deg(y)
if self.interpolationMethod in ['slinear', 'quadratic', 'cubic']:
f = scipy.interpolate.interp1d(x, y, kind=self.interpolationMethod)
elif self.interpolationMethod == 'pchip':
f = scipy.interpolate.pchip(x, y)
ax.plot(x, y, 'ko')
seriesNames.append(jointName + ' points')
ax.plot(xnew, f(xnew), '-')
seriesNames.append(jointName + ' ' + self.interpolationMethod)
ax.legend(seriesNames, loc='upper right').draggable()
ax.set_xlabel('time (s)')
ax.set_ylabel('joint angle (deg)')
ax.set_title('joint trajectories')
plt.show()
| bsd-3-clause |
fierval/retina | plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| mit |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/numpy/lib/npyio.py | 42 | 71218 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-2-clause |
jreback/pandas | pandas/io/sql.py | 1 | 68788 | """
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from contextlib import contextmanager
from datetime import date, datetime, time
from functools import partial
import re
from typing import Any, Dict, Iterator, List, Optional, Sequence, Union, cast, overload
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import DtypeArg
from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy # noqa: F811
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, "keys"): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, "__iter__"):
parse_dates = [parse_dates]
return parse_dates
def _handle_date_column(
col, utc: Optional[bool] = None, format: Optional[Union[str, Dict[str, Any]]] = None
):
if isinstance(format, dict):
# GH35185 Allow custom error values in parse_dates argument of
# read_sql like functions.
# Format can take on custom to_datetime argument values such as
# {"errors": "coerce"} or {"dayfirst": True}
error = format.pop("errors", None) or "ignore"
return to_datetime(col, errors=error, **format)
else:
# Allow passing of formatting string for integers
# GH17855
if format is None and (
issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)
):
format = "s"
if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
return to_datetime(col, errors="coerce", unit=format, utc=utc)
elif is_datetime64tz_dtype(col.dtype):
# coerce to UTC timezone
# GH11216
return to_datetime(col, utc=True)
else:
return to_datetime(col, errors="coerce", format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(
data,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: Optional[DtypeArg] = None,
):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
if dtype:
frame = frame.astype(dtype)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql_table(
table_name: str,
con,
schema: Optional[str] = None,
index_col: Optional[Union[str, Sequence[str]]] = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError(
"read_sql_table only supported for SQLAlchemy connectable."
)
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError as err:
raise ValueError(f"Table {table_name} not found") from err
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
if table is not None:
return table
else:
raise ValueError(f"Table {table_name} not found", con)
@overload
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: None = None,
dtype: Optional[DtypeArg] = None,
) -> DataFrame:
...
@overload
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: int = 1,
dtype: Optional[DtypeArg] = None,
) -> Iterator[DataFrame]:
...
def read_sql_query(
sql,
con,
index_col=None,
coerce_float: bool = True,
params=None,
parse_dates=None,
chunksize: Optional[int] = None,
dtype: Optional[DtypeArg] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : str SQL query or SQLAlchemy Selectable (select or text object)
SQL query to be executed.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
.. versionadded:: 1.3.0
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC.
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
dtype=dtype,
)
@overload
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql(
sql,
con,
index_col: Optional[Union[str, Sequence[str]]] = None,
coerce_float: bool = True,
params=None,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy connectable; str
connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
Read data from SQL via either a SQL query or a SQL tablename.
When using a SQLite database only SQL queries are accepted,
providing only the SQL tablename will result in an error.
>>> from sqlite3 import connect
>>> conn = connect(':memory:')
>>> df = pd.DataFrame(data=[[0, '10/11/12'], [1, '12/11/10']],
... columns=['int_column', 'date_column'])
>>> df.to_sql('test_data', conn)
>>> pd.read_sql('SELECT int_column, date_column FROM test_data', conn)
int_column date_column
0 0 10/11/12
1 1 12/11/10
>>> pd.read_sql('test_data', 'postgres:///db_name') # doctest:+SKIP
Apply date parsing to columns through the ``parse_dates`` argument
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates=["date_column"])
int_column date_column
0 0 2012-10-11
1 1 2010-12-11
The ``parse_dates`` argument calls ``pd.to_datetime`` on the provided columns.
Custom argument values for applying ``pd.to_datetime`` on a column are specified
via a dictionary format:
1. Ignore errors while parsing the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"errors": "ignore"}})
int_column date_column
0 0 2012-10-11
1 1 2010-12-11
2. Apply a dayfirst date parsing order on the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"dayfirst": True}})
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
3. Apply custom formatting when date parsing the values of "date_column"
>>> pd.read_sql('SELECT int_column, date_column FROM test_data',
... conn,
... parse_dates={"date_column": {"format": "%d/%m/%y"}})
int_column date_column
0 0 2012-11-10
1 1 2010-11-12
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
# using generic exception to catch errors from sql drivers (GH24988)
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
else:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
def to_sql(
frame,
name: str,
con,
schema: Optional[str] = None,
if_exists: str = "fail",
index: bool = True,
index_label=None,
chunksize: Optional[int] = None,
dtype: Optional[DtypeArg] = None,
method: Optional[str] = None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame, Series
name : str
Name of SQL table.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : str, optional
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : str or sequence, optional
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 fallback mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
- None : Uses standard SQL ``INSERT`` clause (one per row).
- 'multi': Pass multiple values in a single ``INSERT`` clause.
- callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if if_exists not in ("fail", "replace", "append"):
raise ValueError(f"'{if_exists}' is not valid for if_exists")
pandas_sql = pandasSQL_builder(con, schema=schema)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError(
"'frame' argument should be either a Series or a DataFrame"
)
pandas_sql.to_sql(
frame,
name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def has_table(table_name: str, con, schema: Optional[str] = None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it.
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, str):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(
con, schema: Optional[str] = None, meta=None, is_cursor: bool = False
):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters.
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, str):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type conversions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(
self,
name: str,
pandas_sql_engine,
frame=None,
index=True,
if_exists="fail",
prefix="pandas",
index_label=None,
schema=None,
keys=None,
dtype: Optional[DtypeArg] = None,
):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError(f"Could not init table '{name}'")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == "fail":
raise ValueError(f"Table '{self.name}' already exists.")
elif self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
else:
raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
def _execute_insert(self, conn, keys: List[str], data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(), data)
def _execute_insert_multi(self, conn, keys: List[str], data_iter):
"""
Alternative to _execute_insert for DBs support multivalue INSERT.
Note: multi-value insert is usually faster for analytics DBs
and tables containing a few columns
but performance degrades quickly with increase of columns.
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(data))
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(f"duplicate name in index/columns: {err}") from err
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
for i, (_, ser) in enumerate(temp.items()):
vals = ser._values
if vals.dtype.kind == "M":
d = vals.to_pydatetime()
elif vals.dtype.kind == "m":
# store as integers, see GH#6921, GH#7076
d = vals.view("i8").astype(object)
else:
d = vals.astype(object)
assert isinstance(d, np.ndarray), type(d)
if ser._can_hold_na:
# Note: this will miss timedeltas since they are converted to int
mask = isna(d)
d[mask] = None
data_list[i] = d
return column_names, data_list
def insert(self, chunksize: Optional[int] = None, method: Optional[str] = None):
# set insert method
if method is None:
exec_insert = self._execute_insert
elif method == "multi":
exec_insert = self._execute_insert_multi
elif callable(method):
exec_insert = partial(method, self)
else:
raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
chunks = (nrows // chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
exec_insert(conn, keys, chunk_iter)
def _query_iterator(
self,
result,
chunksize: Optional[str],
columns,
coerce_float: bool = True,
parse_dates=None,
):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
for idx in self.index[::-1]:
cols.insert(0, self.table.c[idx])
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
column_names,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
f"levels, which is {nlevels}"
)
else:
return index_label
# return the used column labels for the index columns
if (
nlevels == 1
and "index" not in self.frame.columns
and self.frame.index.name is None
):
return ["index"]
else:
return [
l if l is not None else f"level_{i}"
for i, l in enumerate(self.frame.index.names)
]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, str):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(self.frame.index._get_level_values(i))
column_names_and_types.append((str(idx_label), idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Column, PrimaryKeyConstraint, Table
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
columns = [
Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types
]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (
col_type is datetime
or col_type is date
or col_type is DatetimeTZDtype
):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype("int64") or col_type is bool:
self.frame[col_name] = df_col.astype(col_type, copy=False)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, col):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
TIMESTAMP,
BigInteger,
Boolean,
Date,
DateTime,
Float,
Integer,
SmallInteger,
Text,
Time,
)
if col_type == "datetime64" or col_type == "datetime":
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
if col.dt.tz is not None:
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
# GH 26761 or an Index with date-like data e.g. 9999-01-01
if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
return BigInteger
elif col_type == "floating":
if col.dtype == "float32":
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == "integer":
# GH35076 Map pandas integer to optimal SQLAlchemy integer type
if col.dtype.name.lower() in ("int8", "uint8", "int16"):
return SmallInteger
elif col.dtype.name.lower() in ("uint16", "int32"):
return Integer
elif col.dtype.name.lower() == "uint64":
raise ValueError("Unsigned 64 bit integer datatype is not supported")
else:
return BigInteger
elif col_type == "boolean":
return Boolean
elif col_type == "date":
return Date
elif col_type == "time":
return Time
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype("int64")
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql.
"""
def read_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: Optional[DtypeArg] = None,
method=None,
):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
class SQLDatabase(PandasSQL):
"""
This class enables conversion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction.
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema: Optional[str] = None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, "execute"):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execution_options().execute(*args, **kwargs)
def read_table(
self,
table_name: str,
index_col: Optional[Union[str, Sequence[str]]] = None,
coerce_float: bool = True,
parse_dates=None,
columns=None,
schema: Optional[str] = None,
chunksize: Optional[int] = None,
):
"""
Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
@staticmethod
def _query_iterator(
result,
chunksize: int,
columns,
index_col=None,
coerce_float=True,
parse_dates=None,
dtype: Optional[DtypeArg] = None,
):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
def read_query(
self,
sql: str,
index_col: Optional[str] = None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: Optional[int] = None,
dtype: Optional[DtypeArg] = None,
):
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
dtype : Type name or dict of columns
Data type for data or columns. E.g. np.float64 or
{‘a’: np.float64, ‘b’: np.int32, ‘c’: ‘Int64’}
.. versionadded:: 1.3.0
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
return frame
read_sql = read_query
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: Optional[DtypeArg] = None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
method : {None', 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if dtype:
if not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
else:
dtype = cast(dict, dtype)
from sqlalchemy.types import TypeEngine, to_instance
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
schema=schema,
dtype=dtype,
)
table.create()
from sqlalchemy import exc
try:
table.insert(chunksize, method=method)
except exc.SQLAlchemyError as err:
# GH34431
msg = "(1054, \"Unknown column 'inf' in 'field list'\")"
err_text = str(err.orig)
if re.search(msg, err_text):
raise ValueError("inf cannot be used with MySQL") from err
else:
raise err
if not name.isdigit() and not name.islower():
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema, connection=conn
)
if name not in table_names:
msg = (
f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name: str, schema: Optional[str] = None):
return self.connectable.run_callable(
self.connectable.dialect.has_table, name, schema or self.meta.schema
)
def get_table(self, table_name: str, schema: Optional[str] = None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get(".".join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name: str, schema: Optional[str] = None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(
self,
frame: DataFrame,
table_name: str,
keys: Optional[List[str]] = None,
dtype: Optional[DtypeArg] = None,
schema: Optional[str] = None,
):
table = SQLTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
"string": "TEXT",
"floating": "REAL",
"integer": "INTEGER",
"datetime": "TIMESTAMP",
"date": "DATE",
"time": "TIME",
"boolean": "INTEGER",
}
def _get_unicode_name(name):
try:
uname = str(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError as err:
raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
return uname
def _get_valid_sqlite_name(name):
# See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError("SQLite identifier cannot contain NULs")
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = (
"The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to underscores."
)
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super().__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self, *, num_rows: int):
names = list(map(str, self.frame.columns))
wld = "?" # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
for idx in self.index[::-1]:
names.insert(0, idx)
bracketed_names = [escape(column) for column in names]
col_names = ",".join(bracketed_names)
row_wildcards = ",".join([wld] * len(names))
wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows))
insert_statement = (
f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(num_rows=1), data_list)
def _execute_insert_multi(self, conn, keys, data_iter):
data_list = list(data_iter)
flattened_data = [x for row in data_list for x in row]
conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
pat = re.compile(r"\s+")
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [
escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
if self.schema:
schema_name = self.schema + "."
else:
schema_name = ""
create_stmts = [
"CREATE TABLE "
+ schema_name
+ escape(self.name)
+ " (\n"
+ ",\n ".join(create_tbl_stmts)
+ "\n)"
]
ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX "
+ escape("ix_" + self.name + "_" + cnames)
+ "ON "
+ escape(self.name)
+ " ("
+ cnames_br
+ ")"
)
return create_stmts
def _sql_type_name(self, col):
dtype: DtypeArg = self.dtype or {}
if is_dict_like(dtype):
dtype = cast(dict, dtype)
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support SQLite connections (fallback without
SQLAlchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, is_cursor: bool = False):
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except Exception:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception as inner_exc: # pragma: no cover
ex = DatabaseError(
f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
raise ex from exc
@staticmethod
def _query_iterator(
cursor,
chunksize: int,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: Optional[DtypeArg] = None,
):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
def read_query(
self,
sql,
index_col=None,
coerce_float: bool = True,
params=None,
parse_dates=None,
chunksize: Optional[int] = None,
dtype: Optional[DtypeArg] = None,
):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(
cursor,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype: Optional[DtypeArg] = None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: string
Name of SQL table.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if it does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatibility with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
method : {None, 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if dtype:
if not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
else:
dtype = cast(dict, dtype)
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
dtype=dtype,
)
table.create()
table.insert(chunksize, method)
def has_table(self, name: str, schema: Optional[str] = None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
return len(self.execute(query, [name]).fetchall()) > 0
def get_table(self, table_name: str, schema: Optional[str] = None):
return None # not supported in fallback mode
def drop_table(self, name: str, schema: Optional[str] = None):
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
def _create_sql_schema(
self,
frame,
table_name: str,
keys=None,
dtype: Optional[DtypeArg] = None,
schema: Optional[str] = None,
):
table = SQLiteTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
def get_schema(
frame,
name: str,
keys=None,
con=None,
dtype: Optional[DtypeArg] = None,
schema: Optional[str] = None,
):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
schema: str, default: None
Optional specifying the schema to be used in creating the table.
.. versionadded:: 1.2.0
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(
frame, name, keys=keys, dtype=dtype, schema=schema
)
| bsd-3-clause |
kylerbrown/scikit-learn | sklearn/cluster/tests/test_k_means.py | 132 | 25860 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
rohanp/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
rahuldhote/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/ensemble/voting_classifier.py | 8 | 8679 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
from ..exceptions import NotFittedError
from ..utils.validation import check_is_fitted
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array-like, shape = [n_predictions]
The classes labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of (string, estimator)'
' tuples')
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/scipy/signal/spectral.py | 10 | 35012 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0018156616014838548
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X multiplied by
the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency linearly changes
with time from 1kHz to 2kHz, corrupted by 0.001 V**2/Hz of white noise
sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> freq = np.linspace(1e3, 2e3, N)
>>> x = amp * np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, time, Pxy = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode=mode)
return freqs, time, Pxy
def coherence(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
nfft=None, detrend='constant', axis=-1):
"""
Estimate the magnitude squared coherence estimate, Cxy, of discrete-time
signals X and Y using Welch's method.
Cxy = abs(Pxy)**2/(Pxx*Pyy), where Pxx and Pyy are power spectral density
estimates of X and Y, and Pxy is the cross spectral density estimate of X
and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals"
Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hanning', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
t : ndarray
Array of times corresponding to each data segment
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving average
filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
"""
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating data?",
http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
| mit |
anntzer/mplcursors | setup.py | 1 | 3367 | from setupext import find_packages, setup
# We cannot directly import matplotlib if `MPLCURSORS` is set because
# `sys.path` is not correctly set yet.
#
# The loading of `matplotlib.figure` does not go through the path entry finder
# because it is a submodule, so we must use a metapath finder instead.
@setup.register_pth_hook("mplcursors.pth")
def _pth_hook():
import os
if os.environ.get("MPLCURSORS"):
from importlib.machinery import PathFinder
import sys
class MplcursorsMetaPathFinder(PathFinder):
def find_spec(self, fullname, path=None, target=None):
spec = super().find_spec(fullname, path, target)
if fullname == "matplotlib.figure":
def exec_module(module):
type(spec.loader).exec_module(spec.loader, module)
# The pth file does not get properly uninstalled from
# a develop install. See pypa/pip#4176.
try:
import mplcursors
except ImportError:
return
import functools
import json
import weakref
# Ensure that when the cursor is removed(), or gets
# GC'd because its referents artists are GC'd, the
# entry also disappears.
cursors = weakref.WeakValueDictionary()
options = json.loads(os.environ["MPLCURSORS"])
@functools.wraps(module.Figure.draw)
def wrapper(self, *args, **kwargs):
rv = wrapper.__wrapped__(self, *args, **kwargs)
if self not in cursors:
cursor = mplcursors.cursor(self, **options)
if cursor.artists:
cursors[self] = cursor
else:
# No artist yet; skip possible
# initialization code.
cursor.remove()
return rv
module.Figure.draw = wrapper
spec.loader.exec_module = exec_module
sys.meta_path.remove(self)
return spec
sys.meta_path.insert(0, MplcursorsMetaPathFinder())
setup(
name="mplcursors",
description="Interactive data selection cursors for Matplotlib.",
long_description=open("README.rst", encoding="utf-8").read(),
author="Antony Lee",
url="https://github.com/anntzer/mplcursors",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Matplotlib",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
packages=find_packages("lib"),
package_dir={"": "lib"},
python_requires=">=3.6",
setup_requires=["setuptools_scm"],
use_scm_version=lambda: { # xref __init__.py
"version_scheme": "post-release",
"local_scheme": "node-and-date",
"write_to": "lib/mplcursors/_version.py",
},
install_requires=[
"matplotlib>=3.1",
],
)
| mit |
rajnikant1010/EVAutomation | focus_controller_ws/src/focus_control/src/simulator.py | 1 | 3583 | #!/usr/bin/env python
# license removed for brevity
import rospy
from std_msgs.msg import Float32
import std_msgs.msg as msg
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import math
from swiftnav_piksi.msg import loc
from swiftnav_piksi.msg import fil
from nav_msgs.msg import Odometry
from std_msgs.msg import Float32MultiArray as FloatArray
from std_msgs.msg import Float32
from pcan.msg import CANMsg
#Initialize Global Variables
speed = 15
app = 0
bpp = 0
torque = 0.5
data_received = False
#def callback(data):
def steerK():
global speed,torque
torque = torque*100.0
if speed <= 17.5 and speed > 12.5:
#rospy.loginfo("In 1");
return 128.663265306158*torque**2 - 14555.7517006847*torque + 411749.632653198
elif speed <= 22.5 and speed > 17.5:
#rospy.loginfo("In 2");
return 75.8444183620283*torque**2 - 8585.83320292176*torque + 243128.395670335
elif speed > 22.5:
#rospy.loginfo("In 3");
return 59.3656755346935*torque**2 - 6802.71726656282*torque + 195084.479916551
else:
#rospy.loginfo("In 4");
return 153.303819444404*torque**2 - 16821.7170138839*torque + 460472.934027625
def appK(app):
app = app*100
return 3.65*app - 9.7
def bppK(bpp):
bpp = bpp*100
return -0.001811*bpp**2 + 0.02862*bpp - 0.3768
def longCallback(data):
global app, bpp
app = data.data[0]
bpp = data.data[1]
def latCallback(data):
global torque
torque = data.data
def angCallback(data):
global data_received
data_received = True
def simulator():
#Initialize ROS Nodes
#pub = rospy.Publisher('filter_output', fil, queue_size=10)
pub = rospy.Publisher('sim_out', Float32 , queue_size=10)
pubcan = rospy.Publisher('can_data', CANMsg , queue_size=10)
rospy.init_node('simulator', anonymous=True)
#rospy.Subscriber("/gps/rtkfix", Odometry, callback)
rospy.Subscriber("longitudinal_commands", FloatArray, longCallback)
rospy.Subscriber("lateral_command", Float32, latCallback)
rospy.Subscriber("desired_angle", Float32, angCallback)
rate = 10
rate = rospy.Rate(rate) # 50hz
#desired_angle = 1000
#torque = .60*100
#app = 0.0
#bpp = 0.0
#desired_angle = 0
decel_rate = 0
dt = 1/10
left = False
count = 0
angle = 2000
global speed, app, bpp, torque
can_data = CANMsg()
while not rospy.is_shutdown():
#Wait until desired data is received
while data_received == False:
rospy.loginfo("waiting for data")
rate.sleep()
#steering wheel angle
if torque < 0.5:
torque = 1.0 - torque
left = True
angle_diff = steerK();
if left == True:
angle_diff = angle_diff*-1
left = False
desired_angle = angle_diff
#desired_angle = angle + angle_diff
angle = 0.9048*angle + 0.09516*desired_angle
#speed
if app != 0:
desired_speed = appK(app)
speed = 0.9971*speed + 0.002853*desired_speed
elif app == 0 and bpp == 0:
decel_rate = 0.15 #m/s^2
speed_diff = decel_rate*dt
speed = speed + speed_diff*2.23694 #Convert to mph from m/s
if speed < 0:
speed = 0
else:
desired_speed = bppK(bpp)
decel_rate = 0.9355*decel_rate + 0.06449*desired_speed
speed_diff = decel_rate*dt
speed = speed + speed_diff*2.23694 #Convert to mph from m/s
if speed < 0:
speed = 0
#can_data.app = app
can_data.mph = speed
can_data.steering_angle = np.int16(angle)
#can_data.bpp = bpp
pub.publish(speed)
pubcan.publish(can_data)
rate.sleep()
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
try:
simulator()
except rospy.ROSInterruptException:
pass
| bsd-2-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| mit |
pompiduskus/scikit-learn | sklearn/grid_search.py | 103 | 36232 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
sgenoud/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 2 | 2477 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print __doc__
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD
import numpy as np
import pylab as pl
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learn_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
pl.figure(figsize=(12, 6))
pl.subplot(1, 2, 1)
pl.title('Deviance')
pl.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
pl.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
pl.legend(loc='upper right')
pl.xlabel('Boosting Iterations')
pl.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
pl.subplot(1, 2, 2)
pl.barh(pos, feature_importance[sorted_idx], align='center')
pl.yticks(pos, boston.feature_names[sorted_idx])
pl.xlabel('Relative Importance')
pl.title('Variable Importance')
pl.show()
| bsd-3-clause |
vibhorag/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
poryfly/scikit-learn | sklearn/externals/joblib/__init__.py | 86 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/frame/test_convert_to.py | 6 | 7374 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(TestData):
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert v2 == recons_data[k][int(k2) - 1]
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert v2 == recons_data[k][k2]
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [np.nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': np.nan, 'B': '3'}]
assert isinstance(recons_data, list)
assert len(recons_data) == 3
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
assert v2 == recons_data[k2][k]
def test_to_dict_timestamp(self):
# GH11247
# split/records producing np.datetime64 rather than Timestamps
# on datetime64[ns] dtypes only
tsmp = Timestamp('20130101')
test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
expected_records = [{'A': tsmp, 'B': tsmp},
{'A': tsmp, 'B': tsmp}]
expected_records_mixed = [{'A': tsmp, 'B': 1},
{'A': tsmp, 'B': 2}]
assert (test_data.to_dict(orient='records') ==
expected_records)
assert (test_data_mixed.to_dict(orient='records') ==
expected_records_mixed)
expected_series = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([tsmp, tsmp], name='B'),
}
expected_series_mixed = {
'A': Series([tsmp, tsmp], name='A'),
'B': Series([1, 2], name='B'),
}
tm.assert_dict_equal(test_data.to_dict(orient='series'),
expected_series)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='series'),
expected_series_mixed)
expected_split = {
'index': [0, 1],
'data': [[tsmp, tsmp],
[tsmp, tsmp]],
'columns': ['A', 'B']
}
expected_split_mixed = {
'index': [0, 1],
'data': [[tsmp, 1],
[tsmp, 2]],
'columns': ['A', 'B']
}
tm.assert_dict_equal(test_data.to_dict(orient='split'),
expected_split)
tm.assert_dict_equal(test_data_mixed.to_dict(orient='split'),
expected_split_mixed)
def test_to_dict_invalid_orient(self):
df = DataFrame({'A': [0, 1]})
pytest.raises(ValueError, df.to_dict, orient='xinvalid')
def test_to_records_dt64(self):
df = DataFrame([["one", "two", "three"],
["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"))
assert df.to_records()['index'][0] == df.index[0]
rs = df.to_records(convert_datetime64=False)
assert rs['index'][0] == df.index.values[0]
def test_to_records_with_multindex(self):
# GH3189
index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)['level_0']
assert 'bar' in r
assert 'one' not in r
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
import collections
collections.Mapping.register(email.message.Message)
headers = Parser().parsestr('From: <[email protected]>\n'
'To: <[email protected]>\n'
'Subject: Test message\n'
'\n'
'Body would go here\n')
frame = DataFrame.from_records([headers])
all(x in frame for x in ['Type', 'Subject', 'From'])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = 'X'
rs = df.to_records()
assert 'X' in rs.dtype.fields
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
assert 'index' in rs.dtype.fields
df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
df.index.names = ['A', None]
rs = df.to_records()
assert 'level_0' in rs.dtype.fields
def test_to_records_with_unicode_index(self):
# GH13172
# unicode_literals conflict with to_records
result = DataFrame([{u'a': u'x', u'b': 'y'}]).set_index(u'a')\
.to_records()
expected = np.rec.array([('x', 'y')], dtype=[('a', 'O'), ('b', 'O')])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue #11879. to_records used to raise an exception when used
# with column names containing non ascii caracters in Python 2
result = DataFrame(data={u"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
# to be specified using dictionnary intsead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", u"accented_name_é"],
"formats": ['<i8', '<f8']}
)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('tz', ['UTC', 'GMT', 'US/Eastern'])
def test_to_records_datetimeindex_with_tz(tz):
# GH13937
dr = date_range('2016-01-01', periods=10,
freq='S', tz=tz)
df = DataFrame({'datetime': dr}, index=dr)
expected = df.to_records()
result = df.tz_convert("UTC").to_records()
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
| mit |
bert9bert/statsmodels | statsmodels/examples/try_tukey_hsd.py | 33 | 6616 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 15:34:18 2012
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import StringIO
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from statsmodels.stats.libqsturng import qsturng
ss = '''\
43.9 1 1
39.0 1 2
46.7 1 3
43.8 1 4
44.2 1 5
47.7 1 6
43.6 1 7
38.9 1 8
43.6 1 9
40.0 1 10
89.8 2 1
87.1 2 2
92.7 2 3
90.6 2 4
87.7 2 5
92.4 2 6
86.1 2 7
88.1 2 8
90.8 2 9
89.1 2 10
68.4 3 1
69.3 3 2
68.5 3 3
66.4 3 4
70.0 3 5
68.1 3 6
70.6 3 7
65.2 3 8
63.8 3 9
69.2 3 10
36.2 4 1
45.2 4 2
40.7 4 3
40.5 4 4
39.3 4 5
40.3 4 6
43.2 4 7
38.7 4 8
40.9 4 9
39.7 4 10'''
#idx Treatment StressReduction
ss2 = '''\
1 mental 2
2 mental 2
3 mental 3
4 mental 4
5 mental 4
6 mental 5
7 mental 3
8 mental 4
9 mental 4
10 mental 4
11 physical 4
12 physical 4
13 physical 3
14 physical 5
15 physical 4
16 physical 1
17 physical 1
18 physical 2
19 physical 3
20 physical 3
21 medical 1
22 medical 2
23 medical 2
24 medical 2
25 medical 3
26 medical 2
27 medical 3
28 medical 1
29 medical 3
30 medical 1'''
ss3 = '''\
1 24.5
1 23.5
1 26.4
1 27.1
1 29.9
2 28.4
2 34.2
2 29.5
2 32.2
2 30.1
3 26.1
3 28.3
3 24.3
3 26.2
3 27.8'''
cylinders = np.array([8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 6, 6, 6, 4, 4,
4, 4, 4, 4, 6, 8, 8, 8, 8, 4, 4, 4, 4, 8, 8, 8, 8, 6, 6, 6, 6, 4, 4, 4, 4, 6, 6,
6, 6, 4, 4, 4, 4, 4, 8, 4, 6, 6, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 6, 6, 4, 6, 4, 4, 4, 4, 4, 4, 4, 4])
cyl_labels = np.array(['USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'France',
'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Japan',
'Germany', 'France', 'Germany', 'Sweden', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'USA', 'USA', 'France', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'Japan', 'USA', 'USA', 'USA', 'USA', 'Germany', 'Japan', 'Japan', 'USA', 'Sweden', 'USA', 'France',
'Japan', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA',
'Germany', 'Japan', 'Japan', 'USA', 'USA', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'USA',
'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Germany', 'USA', 'USA', 'USA'])
dta = np.recfromtxt(StringIO(ss), names=("Rust","Brand","Replication"))
dta2 = np.recfromtxt(StringIO(ss2), names = ("idx", "Treatment", "StressReduction"))
dta3 = np.recfromtxt(StringIO(ss3), names = ("Brand", "Relief"))
from statsmodels.sandbox.stats.multicomp import tukeyhsd
import statsmodels.sandbox.stats.multicomp as multi
#print tukeyhsd(dta['Brand'], dta['Rust'])
def get_thsd(mci):
var_ = np.var(mci.groupstats.groupdemean(), ddof=len(mci.groupsunique))
means = mci.groupstats.groupmean
nobs = mci.groupstats.groupnobs
resi = tukeyhsd(means, nobs, var_, df=None, alpha=0.05, q_crit=qsturng(0.95, len(means), (nobs-1).sum()))
print(resi[4])
var2 = (mci.groupstats.groupvarwithin() * (nobs - 1)).sum() \
/ (nobs - 1).sum()
assert_almost_equal(var_, var2, decimal=14)
return resi
mc = multi.MultiComparison(dta['Rust'], dta['Brand'])
res = mc.tukeyhsd()
print(res)
mc2 = multi.MultiComparison(dta2['StressReduction'], dta2['Treatment'])
res2 = mc2.tukeyhsd()
print(res2)
mc2s = multi.MultiComparison(dta2['StressReduction'][3:29], dta2['Treatment'][3:29])
res2s = mc2s.tukeyhsd()
print(res2s)
res2s_001 = mc2s.tukeyhsd(alpha=0.01)
#R result
tukeyhsd2s = np.array([1.888889,0.8888889,-1,0.2658549,-0.5908785,-2.587133,3.511923,2.368656,0.5871331,0.002837638,0.150456,0.1266072]).reshape(3,4, order='F')
assert_almost_equal(res2s_001.confint, tukeyhsd2s[:,1:3], decimal=3)
mc3 = multi.MultiComparison(dta3['Relief'], dta3['Brand'])
res3 = mc3.tukeyhsd()
print(res3)
tukeyhsd4 = multi.MultiComparison(cylinders, cyl_labels, group_order=["Sweden", "Japan", "Germany", "France", "USA"])
res4 = tukeyhsd4.tukeyhsd()
print(res4)
try:
import matplotlib.pyplot as plt
fig = res4.plot_simultaneous("USA")
plt.show()
except Exception as e:
print(e)
for mci in [mc, mc2, mc3]:
get_thsd(mci)
from scipy import stats
print(mc2.allpairtest(stats.ttest_ind, method='b')[0])
'''same as SAS:
>>> np.var(mci.groupstats.groupdemean(), ddof=3)
4.6773333333333351
>>> var_ = np.var(mci.groupstats.groupdemean(), ddof=3)
>>> tukeyhsd(means, nobs, var_, df=None, alpha=0.05, q_crit=qsturng(0.95, 3, 12))[4]
array([[ 0.95263648, 8.24736352],
[-3.38736352, 3.90736352],
[-7.98736352, -0.69263648]])
>>> tukeyhsd(means, nobs, var_, df=None, alpha=0.05, q_crit=3.77278)[4]
array([[ 0.95098508, 8.24901492],
[-3.38901492, 3.90901492],
[-7.98901492, -0.69098508]])
'''
ss5 = '''\
Comparisons significant at the 0.05 level are indicated by ***.
BRAND
Comparison Difference
Between
Means Simultaneous 95% Confidence Limits Sign.
2 - 3 4.340 0.691 7.989 ***
2 - 1 4.600 0.951 8.249 ***
3 - 2 -4.340 -7.989 -0.691 ***
3 - 1 0.260 -3.389 3.909 -
1 - 2 -4.600 -8.249 -0.951 ***
1 - 3 -0.260 -3.909 3.389 '''
ss5 = '''\
2 - 3 4.340 0.691 7.989 ***
2 - 1 4.600 0.951 8.249 ***
3 - 2 -4.340 -7.989 -0.691 ***
3 - 1 0.260 -3.389 3.909 -
1 - 2 -4.600 -8.249 -0.951 ***
1 - 3 -0.260 -3.909 3.389 '''
dta5 = np.recfromtxt(StringIO(ss5), names = ('pair', 'mean', 'lower', 'upper', 'sig'), delimiter='\t')
sas_ = dta5[[1,3,2]]
confint1 = res3.confint
confint2 = sas_[['lower','upper']].view(float).reshape((3,2))
assert_almost_equal(confint1, confint2, decimal=2)
reject1 = res3.reject
reject2 = sas_['sig'] == '***'
assert_equal(reject1, reject2)
meandiff1 = res3.meandiffs
meandiff2 = sas_['mean']
assert_almost_equal(meandiff1, meandiff2, decimal=14)
| bsd-3-clause |
bmazin/SDR | Projects/Filters/matched_fir.py | 1 | 2980 | import numpy
from scipy import *
from scipy import optimize
import matplotlib.pyplot as mpl
#import pylab
import random, math
import sim_utilities
############################################################################################
# some constants
###########################################################################################
#size = 50
size = 26
#size = 500
dt = 1.
time = numpy.array([i*dt for i in range(size)])
sigma = 10.
############################################################################################
# Define noise correlation function
############################################################################################
L = 2050 # just some big number to smooth C.
C_avg = numpy.array([0.]*size)
nf_avg = numpy.array([0.]*size)
for m in range(L):
noise = [random.gauss(0, sigma) for i in range(size)]
C = numpy.correlate(noise, noise, 'full')/size
C_avg = C_avg + C[0:size]
v = list(C_avg/L)
b = []
for i in range(size):
b.append(v[size-i-1:size] + [0]*(size-i-1))
M_inv = numpy.array(b)
M = numpy.linalg.inv(M_inv)
############################################################################################
# Define pulse template.
############################################################################################
p = []
for t in time:
# ARCONS used 25 us time constant.
#p.append(math.exp(-t/(25*dt)))
p.append(math.exp(-t/(40*dt)))
p = numpy.array(p)
p = p/p.max()
############################################################################################
# Define matched filter, g.
############################################################################################
den = numpy.dot(p, numpy.dot(M,p))
g = (numpy.dot(M,p)+numpy.dot(p,M))/(2*den)
print '[',
for G in g:
print G, ',',
print ']'
print len(g)
#print 'g reversed: '
#print '[',
#for G in g.tolist.reverse():
# print G, ',',
#print ']'
############################################################################################
# Create a bunch of pulses and examine the statistics.
# Note, the convolution of the filter coefficients and the signal are done with
# "numpy.dot(g, signal)." Also, the following assumes the arrival time is known.
############################################################################################
Amplitude = 50.
A, A_raw = [], []
for i in range(1000):
noise = numpy.array([random.gauss(0, sigma) for i in range(size)])
signal = noise + Amplitude*p
A.append(numpy.dot(g, signal))
A_raw.append(signal[0])
#np, bins_raw, patches_peaks = mpl.hist(A_raw, 50, normed=1, facecolor='blue', alpha=0.75)
n, bins_filtered, patches = mpl.hist(A, 50, normed=1, facecolor='green', alpha=0.75)
fig = mpl.figure()
ax1 = fig.add_subplot(211)
ax1.plot(bins_filtered)
#ax1.plot(bins_raw, bins_filtered)
ax2 = fig.add_subplot(212)
ax2.plot(p, '.')
ax3 = ax2.twinx()
ax3.plot(g,'r.')
#ax2.plot(signal.tolist()[250:500]+signal.tolist()[0:250], '.', g, '.', p, '.')
mpl.show()
| gpl-2.0 |
uwafsl/MissionPlanner | Lib/site-packages/scipy/signal/ltisys.py | 53 | 23848 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
#import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
| gpl-3.0 |
ndingwall/scikit-learn | examples/compose/plot_column_transformer_mixed_types.py | 8 | 7505 | """
===================================
Column Transformer with Mixed Types
===================================
.. currentmodule:: sklearn
This example illustrates how to apply different preprocessing and feature
extraction pipelines to different subsets of features, using
:class:`~compose.ColumnTransformer`. This is particularly handy for the
case of datasets that contain heterogeneous data types, since we may want to
scale the numeric features and one-hot encode the categorical ones.
In this example, the numeric data is standard-scaled after mean-imputation,
while the categorical data is one-hot encoded after imputing missing values
with a new category (``'missing'``).
In addition, we show two different ways to dispatch the columns to the
particular pre-processor: by column names and by column data types.
Finally, the preprocessing pipeline is integrated in a full prediction pipeline
using :class:`~pipeline.Pipeline`, together with a simple classification
model.
"""
# Author: Pedro Morales <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
np.random.seed(0)
# Load data from https://www.openml.org/d/40945
X, y = fetch_openml("titanic", version=1, as_frame=True, return_X_y=True)
# Alternatively X and y can be obtained directly from the frame attribute:
# X = titanic.frame.drop('survived', axis=1)
# y = titanic.frame['survived']
# %%
# Use ``ColumnTransformer`` by selecting column by names
###############################################################################
# We will train our classifier with the following features:
#
# Numeric Features:
#
# * ``age``: float;
# * ``fare``: float.
#
# Categorical Features:
#
# * ``embarked``: categories encoded as strings ``{'C', 'S', 'Q'}``;
# * ``sex``: categories encoded as strings ``{'female', 'male'}``;
# * ``pclass``: ordinal integers ``{1, 2, 3}``.
#
# We create the preprocessing pipelines for both numeric and categorical data.
# Note that ``pclass`` could either be treated as a categorical or numeric
# feature.
numeric_features = ['age', 'fare']
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['embarked', 'sex', 'pclass']
categorical_transformer = OneHotEncoder(handle_unknown='ignore')
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test))
# %%
# HTML representation of ``Pipeline``
###############################################################################
# When the ``Pipeline`` is printed out in a jupyter notebook an HTML
# representation of the estimator is displayed as follows:
from sklearn import set_config
set_config(display='diagram')
clf
# %%
# Use ``ColumnTransformer`` by selecting column by data types
###############################################################################
# When dealing with a cleaned dataset, the preprocessing can be automatic by
# using the data types of the column to decide whether to treat a column as a
# numerical or categorical feature.
# :func:`sklearn.compose.make_column_selector` gives this possibility.
# First, let's only select a subset of columns to simplify our
# example.
subset_feature = ['embarked', 'sex', 'pclass', 'age', 'fare']
X_train, X_test = X_train[subset_feature], X_test[subset_feature]
# %%
# Then, we introspect the information regarding each column data type.
X_train.info()
# %%
# We can observe that the `embarked` and `sex` columns were tagged as
# `category` columns when loading the data with ``fetch_openml``. Therefore, we
# can use this information to dispatch the categorical columns to the
# ``categorical_transformer`` and the remaining columns to the
# ``numerical_transformer``.
# %%
# .. note:: In practice, you will have to handle yourself the column data type.
# If you want some columns to be considered as `category`, you will have to
# convert them into categorical columns. If you are using pandas, you can
# refer to their documentation regarding `Categorical data
# <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_.
from sklearn.compose import make_column_selector as selector
preprocessor = ColumnTransformer(transformers=[
('num', numeric_transformer, selector(dtype_exclude="category")),
('cat', categorical_transformer, selector(dtype_include="category"))
])
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test))
# %%
# The resulting score is not exactly the same as the one from the previous
# pipeline becase the dtype-based selector treats the ``pclass`` columns as
# a numeric features instead of a categorical feature as previously:
selector(dtype_exclude="category")(X_train)
# %%
selector(dtype_include="category")(X_train)
# %%
# Using the prediction pipeline in a grid search
##############################################################################
# Grid search can also be performed on the different preprocessing steps
# defined in the ``ColumnTransformer`` object, together with the classifier's
# hyperparameters as part of the ``Pipeline``.
# We will search for both the imputer strategy of the numeric preprocessing
# and the regularization parameter of the logistic regression using
# :class:`~sklearn.model_selection.GridSearchCV`.
param_grid = {
'preprocessor__num__imputer__strategy': ['mean', 'median'],
'classifier__C': [0.1, 1.0, 10, 100],
}
grid_search = GridSearchCV(clf, param_grid, cv=10)
grid_search
# %%
# Calling 'fit' triggers the cross-validated search for the best
# hyper-parameters combination:
#
grid_search.fit(X_train, y_train)
print(f"Best params:")
print(grid_search.best_params_)
# %%
# The internal cross-validation scores obtained by those parameters is:
print(f"Internal CV score: {grid_search.best_score_:.3f}")
# %%
# We can also introspect the top grid search results as a pandas dataframe:
import pandas as pd
cv_results = pd.DataFrame(grid_search.cv_results_)
cv_results = cv_results.sort_values("mean_test_score", ascending=False)
cv_results[["mean_test_score", "std_test_score",
"param_preprocessor__num__imputer__strategy",
"param_classifier__C"
]].head(5)
# %%
# The best hyper-parameters have be used to re-fit a final model on the full
# training set. We can evaluate that final model on held out test data that was
# not used for hyparameter tuning.
#
print(("best logistic regression from grid search: %.3f"
% grid_search.score(X_test, y_test)))
| bsd-3-clause |
anurag313/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
amanzi/ats-dev | tools/python_models/compare.py | 2 | 4371 | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits import axes_grid1
import sys,os
sys.path.append(os.path.join(os.environ['HOME'],'research','python'))
import colors
import permafrost_model_explicit
import permafrost_model
import wc_T
# model explicit
pm = permafrost_model_explicit.PermafrostModel()
wc = wc_T.WC_T(pm)
# model implicit
pm2 = permafrost_model.PermafrostModel()
wc2 = wc_T.WC_T(pm2)
# water content for each case
p0s = np.array([97000,95000,90000])
WC0s = np.array([wc.wc(273.2,p0) for p0 in p0s])
def get_reasonable_Ts():
Ts1 = np.arange(273.15-10, 273.15-5, 1.)
Ts2 = np.arange(273.15-5, 273.15-1, .1)
Ts3 = np.arange(273.15-1, 273.15-.01, .001)
Ts3a = np.arange(273.15-.01, 273.15+.01, .0001)
Ts3b = np.arange(273.15+.01, 273.15+1, .001)
Ts4 = np.arange(273.15+1, 273.15+5, .1)
Ts5 = np.arange(273.15+5, 273.15+10, 1.)
Ts = np.concatenate((Ts1, Ts2, Ts3, Ts3a, Ts3b, Ts4, Ts5))
return Ts
Ts = get_reasonable_Ts()
print len(Ts)
fig = plt.figure(figsize=(10,8))
axs = []
ax1 = []
ax1.append(fig.add_subplot(221))
div1 = axes_grid1.make_axes_locatable(ax1[0])
ax1.append(div1.append_axes("right",size=3.,pad=0,sharey=ax1[0]))
axs.append(ax1)
ax2 = []
ax2.append(fig.add_subplot(222))
div2 = axes_grid1.make_axes_locatable(ax2[0])
ax2.append(div2.append_axes("right",size=3.,pad=0,sharey=ax2[0]))
axs.append(ax2)
ax3 = []
ax3.append(fig.add_subplot(223))
div3 = axes_grid1.make_axes_locatable(ax3[0])
ax3.append(div3.append_axes("right",size=3.,pad=0,sharey=ax3[0]))
axs.append(ax3)
ax4 = []
ax4.append(fig.add_subplot(224))
div4 = axes_grid1.make_axes_locatable(ax4[0])
ax4.append(div4.append_axes("right",size=3.,pad=0,sharey=ax4[0]))
axs.append(ax4)
def plot(c, Ts, pTs, s, pTs2, s2):
axs[0][0].plot(Ts, pTs[:,0], '-', color=c)
axs[0][0].plot(Ts, pTs2[:,0], '-', color=c)
axs[0][1].plot(Ts, pTs[:,0], '-', color=c)
axs[0][1].plot(Ts, pTs2[:,0], '-', color=c)
axs[0][0].plot([273.15,273.15],[-3.e7, .5e7],'k')
axs[0][1].plot([273.15,273.15],[-3.e7, .5e7],'k')
axs[1][0].plot(Ts, s[:,0], '-', color=c)
axs[1][0].plot(Ts, s2[:,0], '-', color=c)
axs[1][1].plot(Ts, s[:,0], '-', color=c)
axs[1][1].plot(Ts, s2[:,0], '-', color=c)
axs[1][0].plot([273.15,273.15],[0,1],'k')
axs[1][1].plot([273.15,273.15],[0,1],'k')
axs[2][0].plot(Ts, s[:,1], '-', color=c)
axs[2][0].plot(Ts, s2[:,1], '-', color=c)
axs[2][1].plot(Ts, s[:,1], '-', color=c)
axs[2][1].plot(Ts, s2[:,1], '-', color=c)
axs[2][0].plot([273.15,273.15],[0,1],'k')
axs[2][1].plot([273.15,273.15],[0,1],'k')
axs[3][0].plot(Ts, s[:,2], '-', color=c)
axs[3][0].plot(Ts, s2[:,2], '-', color=c)
axs[3][1].plot(Ts, s[:,2], '-', color=c)
axs[3][1].plot(Ts, s2[:,2], '-', color=c)
axs[3][0].plot([273.15,273.15],[0,1],'k')
axs[3][1].plot([273.15,273.15],[0,1],'k')
pTs = np.array([[101325.,T] for T in Ts])
s = np.array([pm.saturations_Tp(T,p) for p,T in pTs])
s2 = np.array([pm2.saturations_Tp(T,p) for p,T in pTs])
plot('r', Ts, pTs, s, pTs, s2)
colors = ['goldenrod','g','b']
for i,WC0 in enumerate(WC0s):
pTs = np.array([[wc.pressure(T,WC0),T] for T in Ts])
s = np.array([pm.saturations_Tp(T,p) for p,T in pTs])
pTs2 = np.array([[wc2.pressure(T,WC0),T] for T in Ts])
s2 = np.array([pm2.saturations_Tp(T,p) for p,T in pTs2])
plot(colors[i],Ts,pTs,s,pTs2,s2)
axs[0][0].set_ylabel("pressure")
axs[0][0].set_xlabel("temperature")
axs[0][0].set_xticks([265.,270, 275, 280])
axs[0][1].set_xlim(273.14,273.16)
axs[0][1].set_xticks([273.14,273.16])
axs[1][0].set_ylabel("gas saturation")
axs[1][0].set_xlabel("temperature")
axs[1][0].set_ylim(-.01,1.01)
axs[1][0].set_xticks([265.,270, 275, 280])
axs[1][1].set_ylim(-.01,1.01)
axs[1][1].set_xlim(273.14,273.16)
axs[1][1].set_xticks([273.14,273.16])
axs[2][0].set_ylabel("liquid saturation")
axs[2][0].set_xlabel("temperature")
axs[2][0].set_ylim(-.01,1.01)
axs[2][0].set_xticks([265.,270, 275, 280])
axs[2][1].set_ylim(-.01,1.01)
axs[2][1].set_xlim(273.14,273.16)
axs[2][1].set_xticks([273.14,273.16])
axs[3][0].set_ylabel("ice saturation")
axs[3][0].set_xlabel("temperature")
axs[3][0].set_ylim(-.01,1.01)
axs[3][0].set_xticks([265.,270, 275, 280])
axs[3][1].set_ylim(-.01,1.01)
axs[3][1].set_xlim(273.14,273.16)
axs[3][1].set_xticks([273.14,273.16])
plt.show()
| bsd-3-clause |
krisaju95/NewsArticleClustering | IncrementalClustering/module6_Classifier.py | 1 | 2735 | import pickle
import numpy as np
import pandas as pd
import math
import os
newsPaperName = "NewsPaper A"
path = "C:/Users/hp/Desktop/FINAL YEAR PROJECT/S8/"
words = set()
dataFrame2 = pickle.load( open(os.path.join(path , 'Crawled Articles' , newsPaperName , 'Feature Set','dataFrame2.p'), "rb" ))
dataFrame3 = pickle.load( open(os.path.join(path , 'Crawled Articles' , newsPaperName , 'Feature Set','dataFrame3.p'), "rb" ))
wordSetSize = len(dataFrame2.columns)
numberOfDocuments = len(dataFrame2.index)
oldDataFrame3 = pickle.load(open(os.path.join(path, 'Feature Set', 'dataFrame3.p'), "rb" ))
numberOfDocumentsOldDataFrame3 = len(oldDataFrame3.index)
originalClusters = pickle.load(open(os.path.join(path, 'KMeansClustering','dataFrame5.p'), "rb"))
dataFrame4 = pickle.load(open(os.path.join(path , 'Crawled Articles' , newsPaperName , 'Cosine Similarity', 'dataFrame4.p'), "rb"))
dataFrame5 = pd.DataFrame(np.zeros(numberOfDocuments).reshape(numberOfDocuments,1))
numberOfClusters = 5
minSimilarityThreshold = 0.007
# Compute cosine similarity given two documents
def findMostSimilarCluster(documentID):
clusterID = 0
#doc = document.as_matrix()
similarityValues = np.zeros(numberOfClusters)
clusterSizes = np.zeros(numberOfClusters)
for i in range(numberOfDocumentsOldDataFrame3):
clusterID = int(originalClusters.ix[i , "ClusterID"])
similarityValue = dataFrame4.ix[documentID , i]
similarityValues[clusterID] = similarityValues[clusterID] + similarityValue
clusterSizes[clusterID] = clusterSizes[clusterID] + 1
similarityValues = np.divide(similarityValues , clusterSizes)
clusterID = np.argmax(similarityValues)
if np.max(similarityValues) < minSimilarityThreshold:
clusterID = -1
return clusterID
# Create a dataframe containing cosine similarity values for each document
def classifyDocuments():
print "Classifying Documents"
for row in range(numberOfDocuments):
#document = dataFrame4.loc[row , :]
mostSimilarCluster = findMostSimilarCluster(row)
dataFrame5.ix[row , "clusterID"] = int(mostSimilarCluster)
print "Documents classified"
print "Saving data in DataFrame5 as a pickle package and as a CSV"
del dataFrame5[0]
dataFrame5.to_pickle(os.path.join(path , 'Crawled Articles' , newsPaperName , 'Cosine Similarity','dataFrame5.p'))
dataFrame5.to_csv(os.path.join(path , 'Crawled Articles' , newsPaperName , 'Cosine Similarity', 'dataFrame5.csv'))
print "DataFrame5 has been saved"
classifyDocuments()
#print len(dataFrame3.columns) | gpl-3.0 |
jmargeta/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 4 | 13867 | import numpy as np
from numpy import linalg
from scipy.sparse import csr_matrix
from scipy.spatial.distance import cosine, cityblock, minkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.preprocessing import normalize
def test_pairwise_distances():
""" Test the pairwise_distance helper function. """
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# manhattan does not support sparse matrices atm.
assert_raises(ValueError, pairwise_distances, csr_matrix(X),
metric="manhattan")
# Test cosine as a string metric versus cosine callable
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Tests that precomputed metric returns pointer to, and not copy of, X.
S = np.dot(X, X.T)
S2 = pairwise_distances(S, metric="precomputed")
assert_true(S is S2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
def test_pairwise_parallel():
rng = np.random.RandomState(0)
for func in (np.array, csr_matrix):
X = func(rng.random_sample((5, 4)))
Y = func(rng.random_sample((3, 4)))
S = euclidean_distances(X)
S2 = _parallel_pairwise(X, None, euclidean_distances, n_jobs=-1)
assert_array_almost_equal(S, S2)
S = euclidean_distances(X, Y)
S2 = _parallel_pairwise(X, Y, euclidean_distances, n_jobs=-1)
assert_array_almost_equal(S, S2)
def test_pairwise_kernels():
""" Test the pairwise_kernels helper function. """
def callable_rbf_kernel(x, y, **kwds):
""" Callable version of pairwise.rbf_kernel. """
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_euclidean_distances():
""" Check the pairwise Euclidean distances computation"""
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
""" Valid kernels should be symmetric"""
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity():
""" Test the cosine_similarity. """
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
""" Ensure that pairwise array check works for dense matrices."""
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
""" Ensure that if XA and XB are given correctly, they return as equal."""
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
""" Ensure an error is raised if the dimensions are different. """
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_invalid_dimensions():
""" Ensure an error is raised on 1D input arrays. """
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
""" Ensures that checks return valid sparse matrices. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
assert_equal(XA_sparse, XA_checked)
assert_equal(XB_sparse, XB_checked)
def tuplify(X):
""" Turns a numpy matrix (any n-dimensional array) into tuples."""
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
""" Ensures that checks return valid tuples. """
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
| bsd-3-clause |
kissf-lu/jupyter_app | ipython/py36_simpy_sim/tool/items.py | 1 | 5486 | # -*- coding: utf-8 -*-
"""
作者:Ted
日期:2017-07-13
说明:
包裹 class
货物 class
Uld class
"""
import simpy
import pandas as pd
__all__ = ["Package", "Truck", "Uld", "SmallBag", "SmallPackage", "Pipeline"]
class Package:
"""包裹"""
def __init__(self,
env: simpy.Environment,
attr: pd.Series,
item_id : str,
path: tuple, ):
# 包裹的所有信息都在 attr
self.attr = attr
# id
self.item_id = item_id
# env
self.env = env
# for record
self.plan_path = path
# for popping
self.path = list(path)
# for time
self.time_records = []
# next pipeline_id
self.next_pipeline = ()
# record for package enter machine
self.package_record = dict(package_id=item_id)
def add_machine_id(self, machine_id):
self.package_record["machine_id"] = machine_id
def start_wait(self):
self.package_record["start_wait"] = self.env.now
def start_serve(self):
self.package_record["start_serve"] = self.env.now
def end_serve(self):
self.package_record["end_serve"] = self.env.now
def pop_mark(self):
"""返回下一个pipeline id: (now_loc, next_loc), 删去第一个节点,记录当前的时间点"""
if len(self.path) >= 2:
now_loc, next_loc = self.path[0: 2]
# 当 package 去到 reload(终分拣), 终分拣的队列 id 只有一个值
elif len(self.path) == 1:
now_loc, next_loc = self.path[-1], None
else:
raise ValueError('The path have been empty!')
# remove the now_loc
pop_loc = self.path.pop(0)
self.time_records.append((pop_loc, self.env.now))
# 改变下一个 pipeline id
self.next_pipeline = now_loc, next_loc
def __str__(self):
display_dct = dict(self.attr)
return f"<package attr:{dict(display_dct)}, path: {self.plan_path}>"
class SmallPackage(Package):
"""小件包裹"""
def __str__(self):
display_dct = dict(self.attr)
return f"<SmallBag attr:{dict(display_dct)}, path: {self.plan_path}>"
class SmallBag(Package):
"""小件包"""
# todo
def __init__(self, env: simpy.Environment,
attr: pd.Series,
item_id : str,
path: tuple,
small_packages: pd.DataFrame):
super(SmallBag, self).__init__(env, attr, item_id, path)
self.store = small_packages
self.store_size = len(self.store)
def __str__(self):
display_dct = dict(self.attr)
return f"<SmallBag attr:{dict(display_dct)}, path: {self.plan_path}, store_size:{store_size}>"
class Truck:
"""货车"""
def __init__(self, env: simpy.Environment, item_id: str, come_time: int, truck_type: str, packages:pd.DataFrame):
"""
:param truck_id: self explain
:param come_time: self explain
:param packages: a dataframe contain all packages
"""
self.item_id = item_id
self.come_time = come_time
self.store = packages
self.store_size = len(self.store)
self.truck_type = truck_type
self.env = env
def __str__(self):
return f"<truck_id: {self.item_id}, come_time: {self.come_time}, store_size:{self.store_size}>"
class Uld(Truck):
"""航空箱"""
pass
class Pipeline:
"""传送带"""
def __init__(self,
env: simpy.Environment,
delay_time: float=0,
pipeline_id: tuple=None,
queue_id: str=None,
machine_type: str=None,
):
self.env = env
self.delay = delay_time
self.queue = simpy.Store(env)
self.pipeline_id = pipeline_id
self.queue_id = queue_id
self.machine_type = machine_type
# 传送带上货物的计数
self.latency_counts = 0
self.latency_counts_time = []
# 机器等待区, 队列的计数
self.machine_waiting_counts_time = []
# 加入计数器
self.env.process(self.get_counts())
def get_counts(self):
"""计数器"""
while True:
latency_dict = dict(pipeline_id=self.pipeline_id,
timestamp=self.env.now,
counts=self.latency_counts)
wait_dict = dict(pipeline_id=self.pipeline_id,
timestamp=self.env.now,
counts=len(self.queue.items))
self.latency_counts_time.append(latency_dict)
self.machine_waiting_counts_time.append(wait_dict)
yield self.env.timeout(1)
def latency(self, item: Package):
"""模拟传送时间"""
self.latency_counts += 1
yield self.env.timeout(self.delay)
# 加入数据点
item.pop_mark()
item.add_machine_id(machine_id=self.pipeline_id)
self.queue.put(item)
self.latency_counts -= 1
def put(self, item: Package):
item.start_wait = self.env.now
self.env.process(self.latency(item))
def get(self):
return self.queue.get()
def __str__(self):
return f"<Pipeline: {self.pipeline_id}, delay: {self.delay}, package_counts: {self.latency_counts}>"
if __name__ == '__main__':
pass
| mit |
mattskone/garage_alarm | samples.py | 2 | 2381 | import argparse
import logging
import os
import numpy as np
from sklearn.utils import shuffle
import camera
import config
import features
logger = logging.getLogger(__name__)
def get_samples(pos_samples_dir, neg_samples_dir, reduced=False):
"""Produce sample data ready for training a classifier.
:param pos_samples_dir: path to directory containing positive samples
:param neg_samples_dir: path to directory containing negative samples
:param reduced: when True, apply dimensionality reduction to the samples
:returns: two numpy arrays. The first (x, f) contains the feature data for
x samples, and the second (y, ) contains the classifications for each
of the samples.
"""
logger.info('Getting training samples')
pos_samples = features.get_features_for_dir(pos_samples_dir)
pos_classes = np.ones(pos_samples.shape[0])
neg_samples = features.get_features_for_dir(neg_samples_dir)
neg_classes = np.zeros(neg_samples.shape[0])
samples = np.vstack([pos_samples, neg_samples])
if reduced:
samples = features.reduce_features(samples, False)
classes = np.hstack([pos_classes, neg_classes])
samples, classes = shuffle(samples, classes)
logger.info('Got training samples')
return samples, classes
def take_sample(pos_sample):
""""Take a new sample for use in training.
:param pos_sample: when True, store the captured image as a positive sample
"""
if pos_sample:
sample_dir = os.path.join(config.INSTALL_DIR,
config.POSITIVE_SAMPLE_DIR)
else:
sample_dir = os.path.join(config.INSTALL_DIR,
config.NEGATIVE_SAMPLE_DIR)
c = camera.Camera(sample_dir)
c.take_photo()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
mutex_group = parser.add_mutually_exclusive_group(required=True)
mutex_group.add_argument('--positive',
dest='pos_sample',
action='store_true',
help='Set for positive samples')
mutex_group.add_argument('--negative',
dest='pos_sample',
action='store_false',
help='Set for negative samples')
args=parser.parse_args()
take_sample(args.pos_sample)
| mit |
phobson/paramnormal | docs/conf.py | 1 | 9686 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# paramnormal documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 2 17:36:54 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx
import seaborn
clean_bkgd = {'axes.facecolor': 'none', 'figure.facecolor': 'none'}
seaborn.set(style='ticks', rc=clean_bkgd)
source_suffix = ['.rst']
numpydoc_show_class_members = False
autodoc_member_order = 'bysource'
html_theme = 'sphinx_rtd_theme'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, os.path.abspath('sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'plot_generator',
'plot_directive',
'numpydoc',
'ipython_directive',
'ipython_console_highlighting',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Include the example source for plots in API docs
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'paramnormal'
copyright = '2015 - 2016, Paul Hobson'
author = 'Paul Hobson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = '0.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'paramnormaldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'paramnormal.tex', 'paramnormal Documentation',
'Paul Hobson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'paramnormal', 'paramnormal Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'paramnormal', 'paramnormal Documentation',
author, 'paramnormal', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| mit |
decvalts/cartopy | lib/cartopy/tests/mpl/test_pseudo_color.py | 4 | 3007 | # (C) British Crown Copyright 2013 - 2017, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import io
import matplotlib.pyplot as plt
try:
from unittest import mock
except ImportError:
import mock
import numpy as np
import cartopy.crs as ccrs
def test_pcolormesh_fully_masked():
data = np.ma.masked_all((30, 40))
# Check that a fully masked data array doesn't trigger a pcolor call.
with mock.patch('cartopy.mpl.geoaxes.GeoAxes.pcolor') as pcolor:
ax = plt.axes(projection=ccrs.PlateCarree())
ax.pcolormesh(np.linspace(-90, 90, 40), np.linspace(0, 360, 30), data)
assert pcolor.call_count == 0, ("pcolor shouldn't have been called, "
"but was.")
plt.close()
def test_pcolormesh_partially_masked():
data = np.ma.masked_all((30, 40))
data[0:100] = 10
# Check that a partially masked data array does trigger a pcolor call.
with mock.patch('cartopy.mpl.geoaxes.GeoAxes.pcolor') as pcolor:
ax = plt.axes(projection=ccrs.PlateCarree())
ax.pcolormesh(np.linspace(-90, 90, 40), np.linspace(0, 360, 30), data)
assert pcolor.call_count == 1, ("pcolor should have been called "
"exactly once.")
plt.close()
def test_pcolormesh_invisible():
data = np.zeros((3, 3))
# Check that a fully invisible mesh doesn't fail.
with mock.patch('cartopy.mpl.geoaxes.GeoAxes.pcolor') as pcolor:
ax = plt.axes(projection=ccrs.Orthographic())
ax.pcolormesh(np.linspace(-75, 75, 3), np.linspace(105, 255, 3), data,
transform=ccrs.PlateCarree())
assert pcolor.call_count == 0, ("pcolor shouldn't have been called, "
"but was.")
plt.close()
def test_savefig_tight():
nx, ny = 36, 18
xbnds = np.linspace(0, 360, nx, endpoint=True)
ybnds = np.linspace(-90, 90, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
plt.subplot(211, projection=ccrs.Robinson())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
buf = io.BytesIO()
plt.savefig(buf, format='png', bbox_inches='tight')
plt.close()
| gpl-3.0 |
NMGRL/pychron | pychron/ml/editors/cluster.py | 1 | 2924 | # ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from numpy import array, column_stack
from sklearn.cluster import KMeans, AffinityPropagation, MeanShift, estimate_bandwidth, DBSCAN
from sklearn.preprocessing import StandardScaler
from traits.api import List, Instance
from traitsui.api import View, UItem
from uncertainties import nominal_value
from pychron.envisage.tasks.base_editor import BaseTraitsEditor
from pychron.graph.graph import Graph
class ClusterEditor(BaseTraitsEditor):
items = List
graph = Instance(Graph)
def set_items(self, items):
self.items = items
self.graph = g = Graph()
p = g.new_plot()
p.value_range.tight_bounds = False
p.index_range.tight_bounds = False
xattr = 'age'
yattr = 'kca'
g.set_x_title('Age')
g.set_y_title('K/Ca')
cluster_kind = self.plotter_options.cluster_kind
xs = self._extract_attr(items, xattr)
ys = self._extract_attr(items, yattr)
xx = column_stack((xs, ys))
xx = StandardScaler().fit_transform(xx)
if cluster_kind == 'kmeans':
clusterer = KMeans
kw = {'n_clusters': 2}
elif cluster_kind == 'meanshift':
clusterer = MeanShift
ebw = estimate_bandwidth(xx)
kw = {'bandwidth': ebw}
elif cluster_kind == 'dbscan':
clusterer = DBSCAN
kw = {}
else:
clusterer = AffinityPropagation
kw = {'preference': -50}
cs = clusterer(**kw).fit_predict(xx)
g.new_series(xs, ys, colors=cs, type='cmap_scatter')
def _extract_attr(self, items, attr):
return array([nominal_value(ai.get_value(attr)) for ai in items])
def traits_view(self):
v = View(UItem('graph', style='custom'))
return v
if __name__ == '__main__':
import random
class I:
def __init__(self, scale):
self._scale = scale
def get_value(self, item):
return random.random() + self._scale
c = ClusterEditor()
c.set_items([I(1) for i in range(100)] + [I(10) for i in range(100)])
c.configure_traits()
# ============= EOF =============================================
| apache-2.0 |
refgenomics/onecodex | tests/test_utils.py | 2 | 3475 | from functools import partial
import mock
import pytest
from click import BadParameter
from onecodex.api import Api
from onecodex.utils import (
snake_case,
check_for_allowed_file,
valid_api_key,
has_missing_values,
init_sentry,
)
def test_check_allowed_file():
# bad ones
with pytest.raises(SystemExit):
check_for_allowed_file("file.bam")
check_for_allowed_file("file")
# good ones
check_for_allowed_file("file.fastq")
check_for_allowed_file("file.fastq.gz")
def test_is_valid_api_key():
empty_key = ""
short_key = "123"
long_key = "123abc123abc123abc123abc123abc123abc123abc123abc123abc"
good_key = "123abc123abc123abc123abc123abc32"
# its a click callback so it expects some other params
valid_api_key_partial = partial(valid_api_key, None, None)
for key in [empty_key, short_key, long_key]:
with pytest.raises(BadParameter):
valid_api_key_partial(key)
assert good_key == valid_api_key_partial(good_key)
@pytest.mark.parametrize(
"resource,uris",
[
("Samples", []),
("Samples", ["761bc54b97f64980"]),
("Analyses", []),
("Analyses", ["45a573fb7833449a"]),
("Markerpanels", []),
],
)
def test_fetcher(ocx, api_data, resource, uris):
if len(uris) == 0:
pass
else:
for uri in uris:
resource_class = getattr(ocx, resource)
instance = resource_class.get(uri)
assert instance is not None
def test_snake_case():
test_cases = ["SnakeCase", "snakeCase", "SNAKE_CASE"]
for test_case in test_cases:
assert snake_case(test_case) == "snake_case"
def test_custom_ca_bundle(runner, api_data):
"""Tests that we're properly merging settings into our prepared requests."""
with mock.patch("requests.Session.merge_environment_settings") as merge_env:
ocx = Api(base_url="http://localhost:3000", cache_schema=True)
classifications = ocx.Classifications.all()
assert merge_env.call_count >= 1
assert len(classifications) >= 1
def test_has_missing_values():
pytest.importorskip("numpy")
pytest.importorskip("pandas")
import numpy as np
import pandas as pd
assert has_missing_values(pd.Series([1, np.nan, 2]))
assert has_missing_values(pd.Series([np.nan, np.nan]))
assert not has_missing_values(pd.Series([1, 2, 3]))
assert has_missing_values(pd.DataFrame({"col1": [1, 2, 3], "col2": ["a", "b", None]}))
assert not has_missing_values(pd.DataFrame({"col1": [1, 2, 3], "col2": ["a", "b", "c"]}))
@pytest.mark.parametrize(
"ONE_CODEX_NO_TELEMETRY,ONE_CODEX_SENTRY_DSN,call_count,dsn_contains",
[
("1", None, 0, ""),
(None, None, 1, "sentry.io"),
(None, "SomeDSN", 1, "SomeDSN"),
],
)
def test_init_sentry(
monkeypatch, ONE_CODEX_NO_TELEMETRY, ONE_CODEX_SENTRY_DSN, call_count, dsn_contains
):
if ONE_CODEX_NO_TELEMETRY:
monkeypatch.setenv("ONE_CODEX_NO_TELEMETRY", ONE_CODEX_NO_TELEMETRY)
if ONE_CODEX_SENTRY_DSN:
monkeypatch.setenv("ONE_CODEX_SENTRY_DSN", ONE_CODEX_SENTRY_DSN)
with mock.patch("onecodex.utils._setup_sentry_for_ipython") as _, mock.patch(
"sentry_sdk.init"
) as mocked_sentry_init:
init_sentry()
assert mocked_sentry_init.call_count == call_count
if call_count:
assert dsn_contains in mocked_sentry_init.call_args.kwargs["dsn"]
| mit |
harisbal/pandas | pandas/tests/extension/test_interval.py | 1 | 3434 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import IntervalDtype
from pandas import Interval
from pandas.core.arrays import IntervalArray
from pandas.tests.extension import base
import pandas.util.testing as tm
def make_data():
N = 100
left = np.random.uniform(size=N).cumsum()
right = left + np.random.uniform(size=N)
return [Interval(l, r) for l, r in zip(left, right)]
@pytest.fixture
def dtype():
return IntervalDtype()
@pytest.fixture
def data():
"""Length-100 PeriodArray for semantics test."""
return IntervalArray(make_data())
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return IntervalArray.from_tuples([None, (0, 1)])
@pytest.fixture
def data_for_sorting():
return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)])
@pytest.fixture
def data_missing_for_sorting():
return IntervalArray.from_tuples([(1, 2), None, (0, 1)])
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping():
a = (0, 1)
b = (1, 2)
c = (2, 3)
return IntervalArray.from_tuples([b, b, None, None, a, a, b, c])
class BaseInterval(object):
pass
class TestDtype(BaseInterval, base.BaseDtypeTests):
pass
class TestCasting(BaseInterval, base.BaseCastingTests):
pass
class TestConstructors(BaseInterval, base.BaseConstructorsTests):
pass
class TestGetitem(BaseInterval, base.BaseGetitemTests):
pass
class TestGrouping(BaseInterval, base.BaseGroupbyTests):
pass
class TestInterface(BaseInterval, base.BaseInterfaceTests):
pass
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(BaseInterval, base.BaseMethodsTests):
@pytest.mark.skip(reason='addition is not defined for intervals')
def test_combine_add(self, data_repeated):
pass
@pytest.mark.skip(reason="Not Applicable")
def test_fillna_length_mismatch(self, data_missing):
pass
class TestMissing(BaseInterval, base.BaseMissingTests):
# Index.fillna only accepts scalar `value`, so we have to skip all
# non-scalar fill tests.
unsupported_fill = pytest.mark.skip("Unsupported fillna option.")
@unsupported_fill
def test_fillna_limit_pad(self):
pass
@unsupported_fill
def test_fillna_series_method(self):
pass
@unsupported_fill
def test_fillna_limit_backfill(self):
pass
@unsupported_fill
def test_fillna_series(self):
pass
def test_non_scalar_raises(self, data_missing):
msg = "Got a 'list' instead."
with tm.assert_raises_regex(TypeError, msg):
data_missing.fillna([1, 1])
class TestReshaping(BaseInterval, base.BaseReshapingTests):
pass
class TestSetitem(BaseInterval, base.BaseSetitemTests):
pass
| bsd-3-clause |
qifeigit/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 126 | 13591 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/utils/estimator_checks.py | 16 | 64623 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in
["MultinomialNB", "LabelPropagation", "LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
yield check_get_params_invariance
yield check_dict_unchanged
yield check_no_fit_attributes_set_in_init
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
try:
check(name, Estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
with ignore_warnings(category=DeprecationWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_pandas_series(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_list(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, Estimator):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
# should be just `estimator.fit(X, y)`
# after merging #6141
if name in ['SpectralBiclustering']:
estimator.fit(X)
else:
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
def check_dont_overwrite_parameters(name, Estimator):
# check that fit method only changes or sets private attributes
if hasattr(Estimator.__init__, "deprecated_original"):
# to not check deprecated classes
return
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings(category=DeprecationWarning)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
@ignore_warnings(category=DeprecationWarning)
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with ignore_warnings(category=DeprecationWarning):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if Estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3 and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_array_almost_equal(y_log_prob, np.log(y_prob), 8)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=DeprecationWarning)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=DeprecationWarning)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
@ignore_warnings(category=DeprecationWarning)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=DeprecationWarning)
def check_non_transformer_estimators_n_iter(name, Estimator):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(name, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_transformer_n_iter(name, Estimator):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = Estimator()
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_get_params_invariance(name, estimator):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_decision_proba_consistency(name, Estimator):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = Estimator()
set_testing_parameters(estimator)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
| bsd-3-clause |
poryfly/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
maniteja123/scipy | scipy/stats/_stats_mstats_common.py | 10 | 8427 | from collections import namedtuple
import numpy as np
from . import distributions
__all__ = ['_find_repeats', 'linregress', 'theilslopes']
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
def linregress(x, y=None):
"""
Calculate a linear least-squares regression for two sets of measurements.
Parameters
----------
x, y : array_like
Two sets of measurements. Both arrays should have the same length.
If only x is given (and y=None), then it must be a two-dimensional
array where one dimension has length 2. The two sets of measurements
are then found by splitting the array along the length-2 dimension.
Returns
-------
slope : float
slope of the regression line
intercept : float
intercept of the regression line
rvalue : float
correlation coefficient
pvalue : float
two-sided p-value for a hypothesis test whose null hypothesis is
that the slope is zero.
stderr : float
Standard error of the estimated gradient.
See also
--------
:func:`scipy.optimize.curve_fit` : Use non-linear
least squares to fit a function to data.
:func:`scipy.optimize.leastsq` : Minimize the sum of
squares of a set of equations.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = np.random.random(10)
>>> y = np.random.random(10)
>>> slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
To get coefficient of determination (r_squared)
>>> print("r-squared:", r_value**2)
('r-squared:', 0.080402268539028335)
Plot the data along with the fitted line
>>> plt.plot(x, y, 'o', label='original data')
>>> plt.plot(x, intercept + slope*x, 'r', label='fitted line')
>>> plt.legend()
>>> plt.show()
"""
TINY = 1.0e-20
if y is None: # x is a (2, N) or (N, 2) shaped array_like
x = np.asarray(x)
if x.shape[0] == 2:
x, y = x
elif x.shape[1] == 2:
x, y = x.T
else:
msg = ("If only `x` is given as input, it has to be of shape "
"(2, N) or (N, 2), provided shape was %s" % str(x.shape))
raise ValueError(msg)
else:
x = np.asarray(x)
y = np.asarray(y)
if x.size == 0 or y.size == 0:
raise ValueError("Inputs must not be empty.")
n = len(x)
xmean = np.mean(x, None)
ymean = np.mean(y, None)
# average sum of squares:
ssxm, ssxym, ssyxm, ssym = np.cov(x, y, bias=1).flat
r_num = ssxym
r_den = np.sqrt(ssxm * ssym)
if r_den == 0.0:
r = 0.0
else:
r = r_num / r_den
# test for numerical error propagation
if r > 1.0:
r = 1.0
elif r < -1.0:
r = -1.0
df = n - 2
t = r * np.sqrt(df / ((1.0 - r + TINY)*(1.0 + r + TINY)))
prob = 2 * distributions.t.sf(np.abs(t), df)
slope = r_num / ssxm
intercept = ymean - slope*xmean
sterrest = np.sqrt((1 - r**2) * ssym / ssxm / df)
return LinregressResult(slope, intercept, r, prob, sterrest)
def theilslopes(y, x=None, alpha=0.95):
r"""
Computes the Theil-Sen estimator for a set of points (x, y).
`theilslopes` implements a method for robust linear regression. It
computes the slope as the median of all slopes between paired values.
Parameters
----------
y : array_like
Dependent variable.
x : array_like or None, optional
Independent variable. If None, use ``arange(len(y))`` instead.
alpha : float, optional
Confidence degree between 0 and 1. Default is 95% confidence.
Note that `alpha` is symmetric around 0.5, i.e. both 0.1 and 0.9 are
interpreted as "find the 90% confidence interval".
Returns
-------
medslope : float
Theil slope.
medintercept : float
Intercept of the Theil line, as ``median(y) - medslope*median(x)``.
lo_slope : float
Lower bound of the confidence interval on `medslope`.
up_slope : float
Upper bound of the confidence interval on `medslope`.
Notes
-----
The implementation of `theilslopes` follows [1]_. The intercept is
not defined in [1]_, and here it is defined as ``median(y) -
medslope*median(x)``, which is given in [3]_. Other definitions of
the intercept exist in the literature. A confidence interval for
the intercept is not given as this question is not addressed in
[1]_.
References
----------
.. [1] P.K. Sen, "Estimates of the regression coefficient based on Kendall's tau",
J. Am. Stat. Assoc., Vol. 63, pp. 1379-1389, 1968.
.. [2] H. Theil, "A rank-invariant method of linear and polynomial
regression analysis I, II and III", Nederl. Akad. Wetensch., Proc.
53:, pp. 386-392, pp. 521-525, pp. 1397-1412, 1950.
.. [3] W.L. Conover, "Practical nonparametric statistics", 2nd ed.,
John Wiley and Sons, New York, pp. 493.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, num=150)
>>> y = x + np.random.normal(size=x.size)
>>> y[11:15] += 10 # add outliers
>>> y[-5:] -= 7
Compute the slope, intercept and 90% confidence interval. For comparison,
also compute the least-squares fit with `linregress`:
>>> res = stats.theilslopes(y, x, 0.90)
>>> lsq_res = stats.linregress(x, y)
Plot the results. The Theil-Sen regression line is shown in red, with the
dashed red lines illustrating the confidence interval of the slope (note
that the dashed red lines are not the confidence interval of the regression
as the confidence interval of the intercept is not included). The green
line shows the least-squares fit for comparison.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, y, 'b.')
>>> ax.plot(x, res[1] + res[0] * x, 'r-')
>>> ax.plot(x, res[1] + res[2] * x, 'r--')
>>> ax.plot(x, res[1] + res[3] * x, 'r--')
>>> ax.plot(x, lsq_res[1] + lsq_res[0] * x, 'g-')
>>> plt.show()
"""
# We copy both x and y so we can use _find_repeats.
y = np.array(y).flatten()
if x is None:
x = np.arange(len(y), dtype=float)
else:
x = np.array(x, dtype=float).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y), len(x)))
# Compute sorted slopes only when deltax > 0
deltax = x[:, np.newaxis] - x
deltay = y[:, np.newaxis] - y
slopes = deltay[deltax > 0] / deltax[deltax > 0]
slopes.sort()
medslope = np.median(slopes)
medinter = np.median(y) - medslope * np.median(x)
# Now compute confidence intervals
if alpha > 0.5:
alpha = 1. - alpha
z = distributions.norm.ppf(alpha / 2.)
# This implements (2.6) from Sen (1968)
_, nxreps = _find_repeats(x)
_, nyreps = _find_repeats(y)
nt = len(slopes) # N in Sen (1968)
ny = len(y) # n in Sen (1968)
# Equation 2.6 in Sen (1968):
sigsq = 1/18. * (ny * (ny-1) * (2*ny+5) -
np.sum(k * (k-1) * (2*k + 5) for k in nxreps) -
np.sum(k * (k-1) * (2*k + 5) for k in nyreps))
# Find the confidence interval indices in `slopes`
sigma = np.sqrt(sigsq)
Ru = min(int(np.round((nt - z*sigma)/2.)), len(slopes)-1)
Rl = max(int(np.round((nt + z*sigma)/2.)) - 1, 0)
delta = slopes[[Rl, Ru]]
return medslope, medinter, delta[0], delta[1]
def _find_repeats(arr):
# This function assumes it may clobber its input.
if len(arr) == 0:
return np.array(0, np.float64), np.array(0, np.intp)
# XXX This cast was previously needed for the Fortran implementation,
# should we ditch it?
arr = np.asarray(arr, np.float64).ravel()
arr.sort()
# Taken from NumPy 1.9's np.unique.
change = np.concatenate(([True], arr[1:] != arr[:-1]))
unique = arr[change]
change_idx = np.concatenate(np.nonzero(change) + ([arr.size],))
freq = np.diff(change_idx)
atleast2 = freq > 1
return unique[atleast2], freq[atleast2]
| bsd-3-clause |
bakfu/bakfu | bakfu/classify/base.py | 2 | 1167 | """Base classes for classifiers"""
from ..core.classes import Processor
class BaseClassifier(Processor):
'''
The base class for classifiers.
'''
def __init__(self, *args, **kwargs):
super(BaseClassifier, self).__init__(*args, **kwargs)
self.classifier = None
class SklearnClassifier(BaseClassifier):
'''
A class wrapping sklearn classifiers.
'''
#The sklearn classifier
classifier_class = None
def __init__(self, *args, **kwargs):
super(BaseClassifier, self).__init__(*args, **kwargs)
self.init_classifier(*args, **kwargs)
def init_classifier(self, *args, **kwargs):
'''
Init sklearn classifier.
'''
self.classifier = self.classifier_class(*args, **kwargs)
def run_classifier(self, caller, *args, **kwargs):
pass
def run(self, caller, *args, **kwargs):
return self.run_classifier(caller, *args, **kwargs)
def __getattr__(self, attr):
'''Propagate attribute search to the clusterizer.'''
try:
return getattr(self, attr)
except:
return getattr(self.clusterizer, attr) | bsd-3-clause |
inoue0124/TensorFlow_Keras | chapter4/dropout_keras.py | 1 | 1544 | import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD
from keras.layers.core import Dropout
from sklearn import datasets
from sklearn.model_selection import train_test_split
np.random.seed(0)
'''
データの生成
'''
mnist = datasets.fetch_mldata('MNIST original', data_home='.')
n = len(mnist.data)
N = 10000 # MNISTの一部を使う
indices = np.random.permutation(range(n))[:N] # ランダムにN枚を選択
X = mnist.data[indices]
y = mnist.target[indices]
Y = np.eye(10)[y.astype(int)] # 1-of-K 表現に変換
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.8)
'''
モデル設定
'''
n_in = len(X[0]) # 784
n_hidden = 200
n_out = len(Y[0]) # 10
model = Sequential()
model.add(Dense(n_hidden, input_dim=n_in))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(n_hidden))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(n_hidden))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(n_hidden))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(n_out))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=0.01),
metrics=['accuracy'])
'''
モデル学習
'''
epochs = 100
batch_size = 200
model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size)
'''
予測精度の評価
'''
loss_and_metrics = model.evaluate(X_test, Y_test)
print(loss_and_metrics)
| mit |
probcomp/bdbcontrib | tests/test_population_method.py | 1 | 11661 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas
import pytest
import re
from bdbcontrib import Population
from bdbcontrib import population_method as pm
# pylint: disable=no-member
def test_fill_documentation():
my_fillers = {'__foo__': 'bar', '__baz__': None}
docstr = '''Blah blah.
Nevermind.
Args:
foo: __foo__
blux: a stoats.Mink
baz: __baz__
fifi: optional.
Returns loveliness.
'''
result = pm.fill_documentation(docstr, my_fillers)
import re
assert re.search(r'^\s+foo: bar$', result, re.M), result
assert not re.search(r'baz', result), result
assert re.search(r'Mink\n\s+fifi', result, re.M), result
def test_compile_argspec_transforms():
def curie(fname, when, where, field1='Physics', field2='Chemistry'):
pass
xforms = pm.compile_argspec_transforms(curie, {})
assert ['fname', 'when', 'where'] == xforms['required_names']
assert 3 == len(xforms['required_transformers'])
for i, xform in enumerate(xforms['required_transformers']):
assert i+1 == xform(None, [1, 2, 3])
assert 2 == len(xforms['optional_transformers'])
xforms = pm.compile_argspec_transforms(
curie, {'population': 0, 'generator_name': ['field1', 'field2']})
assert ['when', 'where'] == xforms['required_names']
dummy_self = lambda: None # Can set attrs on this, unlike an object().
assert 3 == len(xforms['required_transformers'])
assert dummy_self == xforms['required_transformers'][0](dummy_self, [])
assert 'boo' == xforms['required_transformers'][1](dummy_self, ['boo'])
assert 'bee' == xforms['required_transformers'][2](dummy_self, [1, 'bee'])
assert 2 == len(xforms['optional_transformers'])
assert 'good' == xforms['optional_transformers']['field1'](
dummy_self, {'field1': 'good'})
dummy_self.generator_name = 'great'
assert 'great' == xforms['optional_transformers']['field2'](
dummy_self, {'field1': 'bad'})
with pytest.raises(IndexError):
xforms = pm.compile_argspec_transforms(
curie, {'population': -1, 'generator_name': ['field1', 'field2']})
with pytest.raises(IndexError):
xforms = pm.compile_argspec_transforms(
curie, {'population': 1, 'population_to_bdb': 1})
with pytest.raises(IndexError):
xforms = pm.compile_argspec_transforms(
curie, {'population': 'field1', 'population_to_bdb': ['field1']})
with pytest.raises(IndexError):
xforms = pm.compile_argspec_transforms(
curie, {'population': 'no_such_option'})
with pytest.raises(IndexError):
xforms = pm.compile_argspec_transforms(
curie, {'population': 1, 'population_to_bdb': 1})
def test_apply_argspec_transforms():
def curie(fname, when, where, field1='Physics', field2='Chemistry'):
pass
xforms = pm.compile_argspec_transforms(
curie, {'population': 0, 'generator_name': ['field1', 'field2']})
assert ['when', 'where'] == xforms['required_names']
log = lambda: None
log.generator_name = 'genny'
(dargs, dkwargs) = pm.apply_argspec_transforms(
log, xforms, ('one', 'two'), {'field1': 'zip!'})
assert [log, 'one', 'two', 'zip!', 'genny'] == dargs
assert {} == dkwargs # Only using dkwargs for varkw at this point.
(dargs, dkwargs) = pm.apply_argspec_transforms(
log, xforms, ('when', 'where', 'f_one', 'f_two'), {})
assert [log, 'when', 'where', 'f_one', 'f_two'] == dargs
assert {} == dkwargs
@pm.population_method(specifier_to_df=[1], population_to_bdb=0,
generator_name='gen')
def example_population_method(bdb, df1, df2, gen=None):
'''A silly population method.
bdb: __population_to_bdb__
df1: __specifier_to_df__
Is the first one.
df2: __specifier_to_df__
Is the second one.
gen: It's okay not to use the doc substitution.
'''
return str(len(df1)) + str(len(df2)) + gen
@pm.population_method()
def minimal_population_method():
# It's fine to not even have a docstring, and not to use any of the
# transformations
return 42
def test_method_calls():
pop = Population('foo', df=pandas.DataFrame({'a': [11, 22]}),
session_capture_name='test_population.py')
assert 42 == pop.minimal_population_method()
# It's ok to name or not name your positional args.
# (But it's a syntax error to have a positional arg after a named one.)
assert '12foo_cc', pop.example_population_method(
df1='select * limit 1', df2=pandas.DataFrame({'b': [23, 34]}))
assert '12foo_cc', pop.example_population_method(
'select * limit 1', df2=pandas.DataFrame({'b': [23, 34]}))
assert '12foo_cc', pop.example_population_method(
'select * limit 1', pandas.DataFrame({'b': [23, 34]}))
# It's okay to name them and get them in the wrong order too.
assert '12foo_cc', pop.example_population_method(
df2=pandas.DataFrame({'b': [23, 34]}), df1='select * limit 1')
# The bdb argument should be present and explained in the function, and
# should be absent in the method, where it's implicit.
as_method = pop.example_population_method.__doc__
as_function = example_population_method.__doc__
assert not re.search(r'bdb', as_method), as_method
assert re.search('bdb', as_function), as_function
with pytest.raises(TypeError) as exc:
pop.minimal_population_method(3)
assert ('test_population_method.minimal_population_method()'
' takes no arguments (1 given)' == str(exc.value)), repr(exc)
epm="test_population_method.example_population_method()"
with pytest.raises(TypeError) as exc:
pop.example_population_method([5])
assert (epm + " takes at least 2 arguments (1 given)"
== str(exc.value)), repr(exc)
with pytest.raises(TypeError) as exc:
pop.example_population_method([5], gen=True)
# This message is among Python's uglier warts, but I want to be consistent.
assert (epm + " takes at least 2 arguments (2 given)"
== str(exc.value)), repr(exc)
with pytest.raises(TypeError) as exc:
pop.example_population_method([1], [2], [3], [4])
assert (epm + " takes at most 3 arguments (4 given)"
== str(exc.value)), repr(exc)
with pytest.raises(TypeError) as exc:
pop.example_population_method("SELECT * FROM %t", "SELECT * FROM %g",
b="bang")
assert (epm + " got an unexpected keyword argument 'b'"
== str(exc.value)), repr(exc)
with pytest.raises(TypeError) as exc:
pop.example_population_method(
"SELECT * FROM %t", df1="SELECT * FROM %g")
# This is another misleading message, because the only way for it to happen
# is with a required positional argument that happens to be named at the
# call site. A caller might, in this case, expect keywords to be interpreted
# first, for example, and positional arguments consumed in order thereafter.
# Not to say that that would be a better choice. I would simply prefer "for
# 2nd argument df1" rather than "for keyword argument df1".
# Again, I find it more important to be consistent than to be right.
assert (epm + " got multiple values for keyword argument 'df1'"
== str(exc.value)), repr(exc)
# Outright repetition, like "gen=1, gen=2", is a syntax error.
@pm.population_method(population=0)
def five_defaults(pop, a=1, b=2, c=3, d=4, e=5):
return [a, b, c, d, e]
def test_fn_defaults():
pop = Population('foo', df=pandas.DataFrame({'a': [11, 22]}),
session_capture_name='test_population.py')
assert [1, 2, 3, 4, 5] == pop.five_defaults()
assert [7, 2, 3, 4, 5] == pop.five_defaults(a=7)
assert [1, 7, 3, 4, 5] == pop.five_defaults(b=7)
assert [1, 2, 7, 4, 5] == pop.five_defaults(c=7)
assert [1, 2, 3, 7, 5] == pop.five_defaults(d=7)
assert [1, 2, 3, 4, 7] == pop.five_defaults(e=7)
assert [7, 2, 8, 4, 9] == pop.five_defaults(a=7, c=8, e=9)
assert [1, 7, 3, 8, 5] == pop.five_defaults(b=7, d=8)
assert [1, 7, 8, 9, 5] == pop.five_defaults(b=7, c=8, d=9)
@pm.population_method(population_name=[0])
def hasvarargs(pname, _u, pop=None, *args):
return (pname, pop, len(args))
@pm.population_method(population_name=[1, 'pop'])
def hasvarkw(_u, pname, pop=None, **kwargs):
return (pname, pop, len(kwargs))
@pm.population_method(population_name=1)
def hasboth_arg(_u, pname, _v, pop=None, *args, **kwargs):
return (pname, pop, len(args), len(kwargs))
@pm.population_method(population_name=[1, 'pop'])
def hasboth_argkwarg(_u, pname, _v, pop=None, *args, **kwargs):
return (pname, pop, len(args), len(kwargs))
@pm.population_method(population_name=[1])
def hasboth_haspop(_u, pname, _v, pop='fizz', *args, **kwargs):
return (pname, pop, len(args), len(kwargs))
def test_variable_arglengths():
pop = Population('foo', df=pandas.DataFrame({'a': [11, 22]}),
session_capture_name='test_population.py')
spec = pm.compile_argspec_transforms(hasvarargs, {'population_name': 0})
assert ['_u'] == spec['required_names']
assert 2 == len(spec['required_transformers'])
assert ['pop'] == spec['optional_names']
assert 1 == len(spec['optional_transformers'])
xfm = spec['optional_transformers']['pop']
assert None == xfm(pop, {})
assert 7 == xfm(pop, {'pop': 7})
assert None == xfm(pop, {'zip': 7})
assert 'args' == spec['varargs']
assert None == spec['varkw']
assert ('foo', 1, 3) == pop.hasvarargs("U", 1, 2, 3, 4)
assert ('foo', 'pip', 3) == pop.hasvarkw("U", pop='pip', a=1, b=2, c=3)
assert ('foo', None, 0, 0) == pop.hasboth_arg('U', 'V')
assert ('foo', 'W', 1, 2) == pop.hasboth_arg("U", "V", "W", "X",
y="Y", z="Z")
assert ('foo', 'foo', 0, 0) == pop.hasboth_argkwarg('U', 'V')
assert ('foo', 'W', 1, 2) == pop.hasboth_argkwarg("U", "V", "W", "X",
y="Y", z="Z")
assert ('foo', 'fizz', 0, 0) == pop.hasboth_haspop('U', 'V')
with pytest.raises(TypeError) as exc:
pop.hasvarargs("U", 1, 2, 3, 4, pop='pip')
assert "multiple values for keyword argument 'pop'" in str(exc.value)
with pytest.raises(TypeError):
pop.hasvarargs("U", 1, 2, a=1, b=2)
with pytest.raises(TypeError):
pop.hasvarkw("U", 1, 2, 3, 4, 5, 6, 7)
with pytest.raises(TypeError):
pop.hasboth_arg()
with pytest.raises(TypeError):
pop.hasboth_argkwarg()
# Can only fill named parameters with population-implicit values.
with pytest.raises(IndexError):
@pm.population_method(population_name='0')
def cannot_fill_unnamed_kwargs(*args, **kwargs):
pass
with pytest.raises(IndexError):
@pm.population_method(population_name='pop')
def cannot_fill_unnamed_args(*args, **kwargs):
pass
| apache-2.0 |
Eric89GXL/scipy | scipy/optimize/_lsq/least_squares.py | 5 | 37709 | """Generic interface for least-square minimization."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional real function of n real
variables) and the loss function rho(s) (a scalar function), `least_squares`
finds a local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar. If the
argument ``x`` is complex or the function ``fun`` returns complex
residuals, it must be wrapped in a real function of real arguments,
as shown at the end of the Examples section.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as many operations as '2-point' (default). The scheme 'cs'
uses complex steps, and while potentially the most accurate, it is
applicable only when `fun` correctly handles complex inputs and
can be analytically continued to the complex plane. Method 'lm'
always uses the '2-point' scheme. If callable, it is used as
``jac(x, *args, **kwargs)`` and should return a good approximation
(or the exact value) for the Jacobian as an array_like (np.atleast_2d
is applied), a sparse matrix or a `scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independent variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
In the next example, we show how complex-valued residual functions of
complex variables can be optimized with ``least_squares()``. Consider the
following function:
>>> def f(z):
... return z - (0.5 + 0.5j)
We wrap it into a function of real variables that returns real residuals
by simply handling the real and imaginary parts as independent variables:
>>> def f_wrap(x):
... fx = f(x[0] + 1j*x[1])
... return np.array([fx.real, fx.imag])
Thus, instead of the original m-dimensional complex function of n complex
variables we optimize a 2m-dimensional real function of 2n real variables:
>>> from scipy.optimize import least_squares
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
>>> z
(0.49999999999925893+0.49999999999925893j)
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
if np.iscomplexobj(x0):
raise ValueError("`x0` must be real.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
| bsd-3-clause |
wilsonkichoi/zipline | zipline/data/benchmarks.py | 8 | 2037 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from six.moves.urllib_parse import urlencode
def format_yahoo_index_url(symbol, start_date, end_date):
"""
Format a URL for querying Yahoo Finance for Index data.
"""
return (
'http://ichart.finance.yahoo.com/table.csv?' + urlencode({
's': symbol,
# start_date month, zero indexed
'a': start_date.month - 1,
# start_date day
'b': start_date.day,
# start_date year
'c': start_date.year,
# end_date month, zero indexed
'd': end_date.month - 1,
# end_date day
'e': end_date.day,
# end_date year
'f': end_date.year,
# daily frequency
'g': 'd',
})
)
def get_benchmark_returns(symbol, start_date, end_date):
"""
Get a Series of benchmark returns from Yahoo.
Returns a Series with returns from (start_date, end_date].
start_date is **not** included because we need the close from day N - 1 to
compute the returns for day N.
"""
return pd.read_csv(
format_yahoo_index_url(symbol, start_date, end_date),
parse_dates=['Date'],
index_col='Date',
usecols=["Adj Close", "Date"],
squeeze=True, # squeeze tells pandas to make this a Series
# instead of a 1-column DataFrame
).sort_index().tz_localize('UTC').pct_change(1).iloc[1:]
| apache-2.0 |
avmarchenko/exatomic | exatomic/molcas/tests/test_output.py | 3 | 8073 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
import numpy as np
import pandas as pd
from unittest import TestCase
from exatomic import Universe
from exatomic.base import resource
from exatomic.molcas.output import Output, Orb, HDF
# TODO : change df.shape[0] == num to len(df.index) == num everywhere
class TestOutput(TestCase):
"""Test the Molcas output file editor."""
def setUp(self):
self.cdz = Output(resource('mol-carbon-dz.out'))
self.uo2sp = Output(resource('mol-uo2-anomb.out'))
self.mamcart = Output(resource('mol-ch3nh2-631g.out'))
self.mamsphr = Output(resource('mol-ch3nh2-anovdzp.out'))
self.c2h6 = Output(resource('mol-c2h6-basis.out'))
def test_add_orb(self):
"""Test adding orbital file functionality."""
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'))
self.assertTrue(hasattr(self.mamcart, 'momatrix'))
self.assertTrue(hasattr(self.mamcart, 'orbital'))
with self.assertRaises(ValueError):
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'))
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'),
mocoefs='same')
self.assertTrue('same' in self.mamcart.momatrix.columns)
self.assertTrue('same' in self.mamcart.orbital.columns)
self.mamcart.add_orb(resource('mol-ch3nh2-631g.scforb'),
mocoefs='diff', orbocc='diffocc')
self.assertTrue('diff' in self.mamcart.momatrix.columns)
self.assertTrue('diffocc' in self.mamcart.orbital.columns)
uni = self.mamcart.to_universe()
self.assertTrue(hasattr(uni, 'momatrix'))
self.assertTrue(hasattr(uni, 'orbital'))
def test_add_overlap(self):
"""Test adding an overlap matrix."""
self.cdz.add_overlap(resource('mol-carbon-dz.overlap'))
self.assertTrue(hasattr(self.cdz, 'overlap'))
uni = self.cdz.to_universe()
self.assertTrue(hasattr(uni, 'overlap'))
def test_parse_atom(self):
"""Test the atom table parser."""
self.uo2sp.parse_atom()
self.assertEqual(self.uo2sp.atom.shape[0], 3)
self.assertTrue(np.all(pd.notnull(self.uo2sp.atom)))
self.mamcart.parse_atom()
self.assertEqual(self.mamcart.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mamcart.atom)))
self.mamsphr.parse_atom()
self.assertEqual(self.mamsphr.atom.shape[0], 7)
self.assertTrue(np.all(pd.notnull(self.mamsphr.atom)))
def test_parse_basis_set_order(self):
"""Test the basis set order table parser."""
self.uo2sp.parse_basis_set_order()
self.assertEqual(self.uo2sp.basis_set_order.shape[0], 69)
self.assertTrue(np.all(pd.notnull(self.uo2sp.basis_set_order)))
self.mamcart.parse_basis_set_order()
self.assertEqual(self.mamcart.basis_set_order.shape[0], 28)
self.assertTrue(np.all(pd.notnull(self.mamcart.basis_set_order)))
self.mamsphr.parse_basis_set_order()
self.assertEqual(self.mamsphr.basis_set_order.shape[0], 53)
self.assertTrue(np.all(pd.notnull(self.mamsphr.basis_set_order)))
def test_parse_basis_set(self):
"""Test the gaussian basis set table parser."""
self.uo2sp.parse_basis_set()
self.assertEqual(self.uo2sp.basis_set.shape[0], 451)
self.assertTrue(np.all(pd.notnull(self.uo2sp.basis_set)))
self.mamcart.parse_basis_set()
self.assertEqual(self.mamcart.basis_set.shape[0], 84)
self.assertTrue(np.all(pd.notnull(self.mamcart.basis_set)))
self.mamsphr.parse_basis_set()
self.assertEqual(self.mamsphr.basis_set.shape[0], 148)
self.assertTrue(np.all(pd.notnull(self.mamsphr.basis_set)))
self.c2h6.parse_basis_set()
self.assertTrue(hasattr(self.c2h6, 'basis_set'))
def test_to_universe(self):
"""Test that the Outputs can be converted to universes."""
uni = self.uo2sp.to_universe()
self.assertIs(type(uni), Universe)
uni = self.mamcart.to_universe()
self.assertIs(type(uni), Universe)
uni = self.mamsphr.to_universe()
self.assertIs(type(uni), Universe)
class TestOrb(TestCase):
"""Test the Molcas Orb file parser."""
def test_parse_old_uhf(self):
sym = Orb(resource('mol-c2h6-old-sym.uhforb'))
nym = Orb(resource('mol-c2h6-old-nosym.uhforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_old_orb(self):
sym = Orb(resource('mol-c2h6-old-sym.scforb'))
nym = Orb(resource('mol-c2h6-old-nosym.scforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_uhf(self):
sym = Orb(resource('mol-c2h6-sym.uhforb'))
nym = Orb(resource('mol-c2h6-nosym.uhforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_orb(self):
sym = Orb(resource('mol-c2h6-sym.scforb'))
nym = Orb(resource('mol-c2h6-nosym.scforb'))
sym.parse_momatrix()
nym.parse_momatrix()
self.assertTrue(sym.momatrix.shape[0] == 274)
self.assertTrue(nym.momatrix.shape[0] == 900)
def test_parse_momatrix(self):
"""Test the momatrix table parser."""
uo2sp = Orb(resource('mol-uo2-anomb.scforb'))
uo2sp.parse_momatrix()
self.assertEqual(uo2sp.momatrix.shape[0], 4761)
self.assertTrue(np.all(pd.notnull(uo2sp.momatrix)))
self.assertTrue(np.all(pd.notnull(uo2sp.orbital)))
mamcart = Orb(resource('mol-ch3nh2-631g.scforb'))
mamcart.parse_momatrix()
self.assertEqual(mamcart.momatrix.shape[0], 784)
self.assertTrue(np.all(pd.notnull(mamcart.momatrix)))
self.assertTrue(np.all(pd.notnull(mamcart.orbital)))
mamsphr = Orb(resource('mol-ch3nh2-anovdzp.scforb'))
mamsphr.parse_momatrix()
self.assertEqual(mamsphr.momatrix.shape[0], 2809)
self.assertTrue(np.all(pd.notnull(mamsphr.momatrix)))
self.assertTrue(np.all(pd.notnull(mamsphr.orbital)))
try:
import h5py
class TestHDF(TestCase):
def setUp(self):
self.nym = HDF(resource('mol-c2h6-nosym-scf.hdf5'))
self.sym = HDF(resource('mol-c2h6-sym-scf.hdf5'))
def test_parse_atom(self):
self.sym.parse_atom()
self.nym.parse_atom()
self.assertTrue(self.sym.atom.shape[0] == 8)
self.assertTrue(self.nym.atom.shape[0] == 8)
def test_parse_basis_set_order(self):
self.sym.parse_basis_set_order()
self.nym.parse_basis_set_order()
self.assertTrue(self.sym.basis_set_order.shape[0] == 30)
self.assertTrue(self.nym.basis_set_order.shape[0] == 30)
def test_parse_orbital(self):
self.sym.parse_orbital()
self.nym.parse_orbital()
self.assertTrue(self.sym.orbital.shape[0] == 30)
self.assertTrue(self.nym.orbital.shape[0] == 30)
def test_parse_overlap(self):
self.sym.parse_overlap()
self.nym.parse_overlap()
self.assertTrue(self.sym.overlap.shape[0])
self.assertTrue(self.nym.overlap.shape[0])
def test_parse_momatrix(self):
self.sym.parse_momatrix()
self.nym.parse_momatrix()
self.assertTrue(self.nym.momatrix.shape[0] == 900)
with self.assertRaises(AttributeError):
self.assertTrue(self.sym.momatrix)
def test_to_universe(self):
self.sym.to_universe()
self.nym.to_universe()
except ImportError:
pass
| apache-2.0 |
hdmetor/scikit-learn | examples/plot_multilabel.py | 87 | 4279 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
ephes/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
toobaz/pandas | pandas/tests/reshape/merge/test_merge_ordered.py | 2 | 3638 | from numpy import nan
import pytest
import pandas as pd
from pandas import DataFrame, merge_ordered
from pandas.util.testing import assert_frame_equal
class TestMergeOrdered:
def setup_method(self, method):
self.left = DataFrame({"key": ["a", "c", "e"], "lvalue": [1, 2.0, 3]})
self.right = DataFrame({"key": ["b", "c", "d", "f"], "rvalue": [1, 2, 3.0, 4]})
def test_basic(self):
result = merge_ordered(self.left, self.right, on="key")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1, nan, 2, nan, 3, nan],
"rvalue": [nan, 1, 2, 3, nan, 4],
}
)
assert_frame_equal(result, expected)
def test_ffill(self):
result = merge_ordered(self.left, self.right, on="key", fill_method="ffill")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1.0, 1, 2, 2, 3, 3.0],
"rvalue": [nan, 1, 2, 3, 3, 4],
}
)
assert_frame_equal(result, expected)
def test_multigroup(self):
left = pd.concat([self.left, self.left], ignore_index=True)
left["group"] = ["a"] * 3 + ["b"] * 3
result = merge_ordered(
left, self.right, on="key", left_by="group", fill_method="ffill"
)
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"] * 2,
"lvalue": [1.0, 1, 2, 2, 3, 3.0] * 2,
"rvalue": [nan, 1, 2, 3, 3, 4] * 2,
}
)
expected["group"] = ["a"] * 6 + ["b"] * 6
assert_frame_equal(result, expected.loc[:, result.columns])
result2 = merge_ordered(
self.right, left, on="key", right_by="group", fill_method="ffill"
)
assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, self.right, on="key", left_by="group")
assert result["group"].notna().all()
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on="key")
assert isinstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat),
]
for df_seq, pattern in test_cases:
with pytest.raises(ValueError, match=pattern):
pd.concat(df_seq)
pd.concat([pd.DataFrame()])
pd.concat([None, pd.DataFrame()])
pd.concat([pd.DataFrame(), None])
def test_doc_example(self):
left = DataFrame(
{
"group": list("aaabbb"),
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3] * 2,
}
)
right = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
result = merge_ordered(left, right, fill_method="ffill", left_by="group")
expected = DataFrame(
{
"group": list("aaaaabbbbb"),
"key": ["a", "b", "c", "d", "e"] * 2,
"lvalue": [1, 1, 2, 2, 3] * 2,
"rvalue": [nan, 1, 2, 3, 3] * 2,
}
)
assert_frame_equal(result, expected)
| bsd-3-clause |
zehpunktbarron/iOSMAnalyzer | scripts/c7_mappertyp.py | 1 | 3598 | # -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#description :This file creates a plot: Calculates the total amount and percentage of node-contributions by each contributer. Results are grouped by three mappertypes: ""Senior-Mappers", "Junior-Mappers" and "Nonrecurring-Mappers"
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :19.01.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
from pylab import *
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
# Verbindung mit der DB mittels psycopg2 herstellen
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query
###
# New cursor method for sql
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
SELECT
-- Senior Mapper
(SELECT
COUNT(user_name)
FROM
(SELECT
user_name,
COUNT(user_name) AS edits_absolut
FROM
hist_point
WHERE
visible = 'true'
GROUP BY
user_name
) as foo1
WHERE
edits_absolut >=1000) AS senior_mappers,
-- Junior Mapper
(SELECT
COUNT(user_name)
FROM
(SELECT
user_name,
COUNT(user_name) AS edits_absolut
FROM
hist_point
WHERE
visible = 'true'
GROUP BY
user_name
) AS foo2
WHERE
edits_absolut <1000 AND edits_absolut >=10) AS junior_mappers,
-- Nonrecurring Mapper
(SELECT
COUNT(user_name)
FROM
(SELECT
user_name,
COUNT(user_name) AS edits_absolut
FROM
hist_point
WHERE
visible = 'true'
GROUP BY
user_name) AS foo3
WHERE
edits_absolut <10) as Nonrecurring_mappers
;
""")
# Return the results of the query. Fetchall() = all rows, fetchone() = first row
records = cur.fetchone()
cur.close()
except:
print "Query could not be executed"
# Get data from query
senior_m = records[0]
junior_m = records[1]
nonrecurring_m = records[2]
# make a square figure and axes
figure(1, figsize=(6,6))
ax = axes([0.2, 0.2, 0.6, 0.6])
# pie-labelling
labels = 'Senior Mappers', 'Junior Mappers', 'Nonrecurring Mappers'
# get db-values as fracs
fracs = [senior_m, junior_m, nonrecurring_m]
# explode values
explode=(0.05, 0.05, 0.05)
# Color in RGB. not shure about the values (counts). Source: http://stackoverflow.com/questions/5133871/how-to-plot-a-pie-of-color-list
data = {(0, 210, 0): 110, (236, 0, 0): 4, (234, 234, 0): 11} # values in hexa: #2DD700 ,#00A287, #FF6700
colors = []
counts = []
for color, count in data.items():
# matplotlib wants colors as 0.0-1.0 floats, not 0-255 ints
colors.append([float(x)/255 for x in color])
counts.append(count)
# Percentage (and total values)
def my_autopct(pct):
total=sum(fracs)
val=int(pct*total/100.0)
return '{p:.1f}% ({v:d})'.format(p=pct,v=val)
# The pie chart (DB-values, explode pies, Labels, decimal places, add shadows to pies
pie(fracs, explode=explode, colors=colors, autopct=my_autopct, labels=labels, shadow=True)
# Title of the pie chart
title('Mappertypes based on their Node-Contribution')
# Save plot to *.jpeg-file
savefig('pics/c7_mappertyp.jpeg')
plt.clf()
| gpl-3.0 |
njwilson23/scipy | scipy/interpolate/ndgriddata.py | 45 | 7161 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
self.tree = cKDTree(self.points)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
michaelaye/iuvs | iuvs/apoapse.py | 1 | 4111 | import os
import sys
import numpy as np
import pandas as pd
from . import io
from .exceptions import ApoapseNonUniqueSpectralPixel
def check_endian(data):
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
if data.dtype.byteorder not in ('=', sys_byteorder):
return data.byteswap().newbyteorder(sys_byteorder)
else:
return data
def process_integration_i(i, b, spectral_pix=15):
selector = b.p_alts[i].apply(lambda x: all(~(x > 0)), axis=1)
# take column 4 as it's the pixel center ground coords.
lats = b.p_lats[i][selector][4]
lons = b.p_lons[i][selector][4]
values = b.spec[i][selector][spectral_pix]
return lats, lons, values
def return_and_convert(lats, lons, data):
return lats, lons, data
class Apoapse(object):
def __init__(self, fname, wavelength):
self.fname = fname
self.wavelength = wavelength
self.l1b = io.L1BReader(fname)
# altitude over limb
self.p_alts = self.get_and_check_PixelGeom('PIXEL_CORNER_MRH_ALT')
# for now only use the center pixel coords for lats and lons
self.p_lats = self.get_and_check_PixelGeom('PIXEL_CORNER_LAT')[:, :, 4]
self.p_lons = self.get_and_check_PixelGeom('PIXEL_CORNER_LON')[:, :, 4]
# this would fail if the data array is not 3-dim
self.spec = pd.Panel(check_endian(self.l1b.img))
spec_pix = self.get_spectral_pixel()
# get boolean selector dataframe for pixels on disk
# the all(...) term refers to the fact that I want all 5 coords
# (corners & center) to be on-disk to be chosen.
selector = self.p_alts.apply(lambda x: all(~(x > 0)), axis=2)
self.selector = selector
# values.ravel to go back to 1D numpy arrays
self.lats = self.p_lats[selector].values.ravel()
self.lons = self.p_lons[selector].values.ravel()
self.data = self.spec[:, :, spec_pix][selector].values.ravel()
def get_spectral_pixel(self):
"""return spectral pixels closest to chosen wavelength.
To use only 1 number for all pixel is a cop-out as I could not find
quickly a way to slice a 3D array with a list of indices for matching wavelengths
in case they ever differ (I think so far all pixels have the same wavelengths,
but that might differ in the future).
TODO: Make this work for a list of differing indices for given wavelengths.
"""
idx = (np.abs(self.l1b.wavelengths-self.wavelength)).argmin(axis=1)
if len(set(idx)) > 1:
raise ApoapseNonUniqueSpectralPixel(idx)
return idx[0]
def get_and_check_PixelGeom(self, colname):
return pd.Panel(check_endian(self.l1b.PixelGeometry[colname]))
def process_fnames(fnames, wavelength):
lats = []
lons = []
data = []
for fname in fnames:
print(os.path.basename(fname))
apo = Apoapse(fname, wavelength)
idx = ~np.isnan(apo.lats)
lats.append(apo.lats[idx])
lons.append(apo.lons[idx])
data.append(apo.data[idx])
try:
df = pd.DataFrame({'lats': np.concatenate(lats),
'lons': np.concatenate(lons),
'data': np.concatenate(data)})
except ValueError:
return None
return df
def process_day(daystring, wavelength, channel='muv'):
"""process day of apoapse data
Parameters
----------
daystring: <str>
Usual format of YYYYmmdd (%Y%m%d)
Returns
-------
pd.DataFrame
Also saving the dataframe in ~/to_keep/apoapse
"""
globstr = "apoapse*-{}_{}T".format(channel, daystring)
fnames = io.l1b_filenames(globstr, env='production')
df = process_fnames(fnames, wavelength)
savename = "{}_{}_{}.h5".format(channel, daystring, wavelength)
path = str(io.analysis_out / 'apoapse' / savename)
df.to_hdf(path, 'df')
print('Created {}.'.format(path))
return df
def process_days(list_of_days, wavelength, channel='muv'):
for day in list_of_days:
process_day(day, wavelength, channel)
| isc |
gotomypc/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
jblupus/PyLoyaltyProject | loyalty/cluto.py | 1 | 1250 | import os
from utils import HOME
import json
import numpy as np
import pandas as pd
SUMMARY_PATH = HOME + '/Dropbox/Twitter/Summary/'
CLUTO_PATH = HOME + '/Dropbox/Twitter/Cluto/'
def to_cluto(filename):
jd = json.load(open(SUMMARY_PATH + filename + '.jsons', 'r'))
keys = jd['summary']
c = 0
cols = 0
for key in keys:
intervals = np.array(keys[key]['intervals'].keys())
c += np.size(intervals[intervals != '0'])
_max = np.max(intervals.astype(int))
cols = _max if _max > cols else cols
print len(keys), cols, c
with open(CLUTO_PATH + filename + '.mat', 'wb') as outfile:
outfile.write(str(len(keys)) + ' ' + str(cols) + ' ' + str(c) +'\n')
for key in keys:
intervals = np.array(keys[key]['intervals'].keys()).astype(int)
intervals = np.sort(filter(lambda x: x != 0, intervals))
vals = [str(i) + ' ' + str(keys[key]['intervals'][str(i)]) for i in intervals]
outfile.write(' '.join(vals) + '\n')
ids = pd.DataFrame()
ids['id'] = keys
ids.to_csv(CLUTO_PATH + 'ids_' + filename + '.csv')
files = set()
for l in os.listdir(SUMMARY_PATH):
files.add(l.split('.')[0])
for f in files:
to_cluto(f)
break
| bsd-2-clause |
L3nzian/AutoPermit | checkForMonitorReqs.py | 1 | 5100 | # -*- coding: utf-8 -*-
#This module runs daily, it looks for something that tells it data is supposed to
#have come in already, and emails the relevant user and Marilyn a reminder to check for
#data
import matplotlib
import win32com.client
import datetime
import os
import re
import getpass
import odbc
import types
import time
CurrentDate = str(datetime.date.today())
date_format = "%Y-%m-%d"
CURDATE = datetime.datetime.fromtimestamp(time.mktime(time.strptime(CurrentDate, date_format)))
RecordsToEmail = []
conn = odbc.odbc("WaterQuality")
cursor = conn.cursor()
sqlSearch = str("select RecordNo,MonitorCheckDate,EmailSentDate from FinalPermitsTable")
cursor.execute(sqlSearch)
results = cursor.fetchall()
if type(results) is not types.NoneType:
if len(results) > 0:
i = 0
while i < len(results):
MONCHECKDATE = datetime.datetime.fromtimestamp(time.mktime(time.strptime(results[i][1], date_format)))
if CURDATE >= MONCHECKDATE and results[i][2] == "":
RecordsToEmail.append(results[i][0])
i+=1
if len(RecordsToEmail) > 0:
for each in RecordsToEmail:
sqlSearch = str("select SessionNo,AppNo,PermitNo,UserName,PublishedDate,MonitorCheckDate from FinalPermitsTable where RecordNo = '"+str(each)+"'")
cursor.execute(sqlSearch)
result = cursor.fetchone()
SessionNo = result[0]
AppNo = result[1]
PermitNo = result[2]
UserName = result[3]
PublishedDate = result[4]
MonitorCheckDate = result[5]
emailSubject = "6 month Reminder for Monitoring Requirements for permit "+str(PermitNo)+". "
emailBody = "Records indicate it has been 6 months since permit "+str(PermitNo)+" was approved via application "+str(AppNo)+" through Session "+str(SessionNo)+". "
emailBody+= "The permit was published on "+str(PublishedDate)+" and the Monitoring data was scheduled to be entered 6 months later on "+str(MonitorCheckDate)+". "
emailBody+= "Monitoring data should be available for this permit. To check monitoring data, open the Water Quality Software, enter the permit number, and check the Monitoring Tabs for appropriate monitoring data."
if UserName.upper() == "RYB":
recipient = "[email protected]"
elif UserName.upper() == "MFS":
recipient = "[email protected]"
elif UserName.upper() == "WWW":
recipient = "[email protected]"
elif UserName.upper() == "CAB":
recipient = "[email protected]"
elif UserName.upper() == "ELC":
recipient = "[email protected]"
elif UserName.upper() == "JKW":
recipient = "[email protected]"
elif UserName.upper() == "GJO":
recipient = "joey.o'[email protected]"
elif UserName.upper() == "STW":
recipient = "[email protected]"
else:
recipient = "[email protected]"
outlook = win32com.client.Dispatch("Outlook.Application")
outlook.Session.Logon
keyVal = r'Software\Microsoft\Office\12.0\Outlook\Security'
try:
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,keyVal,0,_winreg.KEY_ALL_ACCESS)
except:
key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE,keyVal)
#print _winreg.QueryValueEx(key, "ObjectModelGuard")
_winreg.SetValueEx(key,"ObjectModelGuard",0,_winreg.REG_DWORD,2)
#print _winreg.QueryValueEx(key, "ObjectModelGuard")
except:
pass
outlook.Session.Logoff
outlook.Session.Logon
keyVal = r'Software\Microsoft\Office\12.0\Outlook\Security'
try:
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,keyVal,0,_winreg.KEY_ALL_ACCESS)
except:
key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE,keyVal)
#print _winreg.QueryValueEx(key, "ObjectModelGuard")
_winreg.SetValueEx(key,"ObjectModelGuard",0,_winreg.REG_DWORD,2)
#print _winreg.QueryValueEx(key, "ObjectModelGuard")
except:
pass
email = outlook.CreateItem(0)
ReviewerEmailString = "[email protected];[email protected];joey.o'[email protected];[email protected];[email protected];[email protected];[email protected];[email protected]"
ReviewerEmailString = "[email protected]"
email.To = recipient
email.CC = "[email protected]"
email.BCC = ""
email.Subject = emailSubject
email.Body = emailBody
AddSig = email.GetInspector()
email.Send()
sqlSearch = str("update FinalPermitsTable set EmailSentDate = '"+str(CurrentDate)+"' where RecordNo = '"+str(each)+"'")
cursor.execute(sqlSearch)
| gpl-2.0 |
cgre-aachen/gempy | examples/examples/geometries/7_combination.py | 1 | 1682 | """
Model 7 - Combination
======================
"""
# %%
# A folded domain featuring an unconformity and a fault. We start by importing
# the necessary dependencies:
#
# %%
# Importing GemPy
import gempy as gp
import pandas as pd
pd.set_option('precision', 2)
# %%
# Creating the model by importing the input data and displaying it:
#
# %%
data_path = 'https://raw.githubusercontent.com/cgre-aachen/gempy_data/master/'
path_to_data = data_path + "/data/input_data/jan_models/"
geo_data = gp.create_data('combination',
extent=[0, 2500, 0, 1000, 0, 1000],
resolution=[125, 50, 50],
path_o=path_to_data + "model7_orientations.csv",
path_i=path_to_data + "model7_surface_points.csv")
# %%
geo_data.get_data()
# %%
# Setting and ordering the units and series:
#
# %%
gp.map_stack_to_surfaces(geo_data, {"Fault_Series": ('fault'), "Strat_Series1": ('rock3'),
"Strat_Series2": ('rock2','rock1'),
"Basement_Series":('basement')})
geo_data.set_is_fault(['Fault_Series'])
# %%
gp.plot_2d(geo_data, direction='y')
# %%
# Calculating the model:
#
# %%
interp_data = gp.set_interpolator(geo_data, theano_optimizer='fast_compile')
# %%
sol = gp.compute_model(geo_data)
# %%
# Displaying the result in x and y direction:
#
# %%
gp.plot_2d(geo_data, cell_number=5,
direction='y', show_data=False, show_boundaries=True)
# %%
# sphinx_gallery_thumbnail_number = 2
gp.plot_2d(geo_data, cell_number=5,
direction='x', show_data=True)
# %%
gp.plot_3d(geo_data)
gp.save_model(geo_data) | lgpl-3.0 |
paveenju/mlat-sim | main/figure4-2.py | 1 | 2573 | '''
Created on Jan 27, 2017
@author: paveenju
'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import utils.functions as fn
import utils.tdoa_loc as lz
import utils.error_ellipse as ellipse
import matplotlib2tikz.save as tikz_save
# Simulation parameters
L = 1000;
# True target position
U = [5, 70];
# True sensor positions
# R1, R2, R3, R4, R5 = [-5, 8], [4, 6], [-2, 4], [3, 6], [4, 5]
R1, R2, R3, R4 = [-54, 85], [47, 78], [-29, 41], [42, 48]
R = [R1, R2, R3, R4]
# Sensor available
A = [1, 1, 1, 1]
# Noise varibles
mu = 0
sigma = 1
N = sigma**2*np.identity(len(R)*(len(R)-1)/2)
#N = sigma**2*np.diag((3,4,5,6,7,8,9,10,11,12))
# Reference position
#X0 = [0.5, 7]
X0 = [4.9543, 69.6798]
# Monte Carlo simulation
X_h = np.mat(np.zeros((2,L)))
X_hn = np.mat(np.zeros((2,L)))
for k in range(0,L):
E = np.random.normal(mu, sigma, len(A))
#E = [0] * len(A)
X_h[:,k], P = lz.tdoa_loc_taylor(U, R, E, A, X0)
X_hn[:,k], Pn = lz.tdoa_loc_taylor(U, R, E, A, X0, N)
#plt.plot(X_h[0,:], X_h[1,:], '.g', mew=2, ms=2)
#plt.plot(X_hn[0,:], X_hn[1,:], '.y', mew=2, ms=2)
print 'X_h=\n', X_h
print 'X_h with N=\n', X_hn
print 'P=\n', P
print 'P with N=\n', Pn
#stations = np.mat(R)
#plt.plot(stations[:,0], stations[:,1], '^k', mew=5, ms=10)
plt.plot(X_h[0,:], X_h[1,:], 'xr', mew=2, ms=10)
plt.plot(U[0], U[1], '.b', mew=1, ms=20 , label='True position')
plt.plot(X_h[0,99], X_h[1,99], 'xr', mew=2, ms=10, label='Estimated position')
# Plot a transparent 2 standard deviation covariance ellipse
points = np.array(X_h.T)
points_n = np.array(X_hn.T)
ellipse.plot_point_cov(points,
nstd=2,
fill=False,
edgecolor='green',
linewidth='2')
#ellipse.plot_point_cov(points_n,
# nstd=2,
# fill=False,
# edgecolor='orange',
# linewidth='2')
#points = np.random.multivariate_normal(mean=(U[0],U[1]), cov=P, size=1000)
#x, y = points.T
#ellipse.plot_point_cov(points,
# nstd=4,
# fill=False,
# edgecolor='green',
# linewidth='2')
#plt.xticks(np.arange(1, 9, 1.0))
#plt.yticks(np.arange(65, 73, 1.0))
plt.axis('equal')
plt.xlabel('X (m)')
plt.ylabel('Y (m)')
plt.legend(numpoints=1, loc='upper right', shadow=True)
#plt.show()
tikz_save('../output/fig4-3.tex') | gpl-3.0 |
Luke035/TensorflowMNIST | scoring_mnist.py | 1 | 5190 | from tensorflow.contrib import learn
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
ROOT_DATA_PATH = '~/tensorflowMNIST/'
TRAIN_FILE_NAME = 'train.csv'
TEST_FILE_NAME = 'test.csv'
MODEL_CHECKPOINT_DIR = "~/mnist_model_dir"
SUBMISSION_FILE = "~/digit_submission2.csv"
SAMPLE = False #Set to TRUE if you want to SAMPLE the trainig set
LEARNING_RATE = 0.001
OPTIMIZER = 'SGD'
STEPS = 10000
BATCH_SIZE = 20
CHECKPOINTS_SECS = 30
VALIDATION_STEPS = 500
EPOCHS = 1
model_params = {"learning_rate": LEARNING_RATE, "optimizer": OPTIMIZER}
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def reshapeDataframe(toReshapeDf, rowDim1, rowDim2):
data_frame_size = len(toReshapeDf)
#Must be casted to np.float32
return toReshapeDf.values.reshape(data_frame_size, rowDim1, rowDim2, 1).astype(np.float32)
def my_model(features, target, mode, params):
'''
one-hot Porta il vettore target a una matrice one-hot
es: [2,1,0,8]
[
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]
]
'''
print(features.shape)
print(target.shape)
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
print(target.shape)
#Stacking 2 fully connected layers
#features = layers.stack(features, layers.fully_connected, [100, 10])
#First convolutional layer
with tf.variable_scope('conv_layer1'):
features = layers.convolution2d(inputs=features, num_outputs=32, kernel_size=[5,5], data_format='NHWC', activation_fn=tf.nn.relu )
features = layers.max_pool2d(inputs=features, kernel_size=2, stride=2,padding='SAME', data_format='NHWC' )
#Ogni immagine esce con un numero di canali uguale al num_outputs
#(28, 28, 1) -> (28, 28, 100)
#Second convolutional layer
with tf.variable_scope('conv_layer2'):
features = layers.convolution2d(inputs=features, num_outputs=64, kernel_size=[5,5], data_format='NHWC', activation_fn=tf.nn.relu )
features = layers.max_pool2d(inputs=features, kernel_size=2, stride=2,padding='SAME', data_format='NHWC' )
# Back to bidimensional space
features = tf.reshape(features, [- 1, 64 * 7 * 7])
#Fully connected layer
with tf.variable_scope('fc_layer1'):
features = layers.fully_connected(features, 1024, activation_fn=tf.nn.relu)
#Dropout
with tf.variable_scope('dropout'):
features = layers.dropout(features, keep_prob=0.5, is_training=True)
#Readout layerinput_fn
with tf.variable_scope('fc_layer2'):
features = layers.fully_connected(features, 10, activation_fn=None)
#Loss function
with tf.variable_scope('loss'):
loss = tf.losses.softmax_cross_entropy(target, features)
with tf.variable_scope('train'):
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer = params["optimizer"],
learning_rate = params["learning_rate"]
)
#Dictionaries
predictions = {
"class": tf.argmax(features, 1)
}
eval_metric_ops = {"accuracy": tf.metrics.accuracy(tf.argmax(target,1), tf.argmax(features,1))}
return model_fn_lib.ModelFnOps(
mode = mode,
predictions = predictions,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops)
#Retrieve test set as panda_df
train_df = pd.read_csv(ROOT_DATA_PATH + TRAIN_FILE_NAME).sample(n=1, replace=False, axis=0)
data_df = train_df[train_df.columns[1:]]
label_df = train_df[train_df.columns[0]]
x_train = reshapeDataframe(data_df, 28, 28)
#Restore last checkpoint (must resotre through fit with 0 steps https://github.com/tensorflow/tensorflow/issues/3340)
classifier = learn.Estimator(model_fn=my_model, params=model_params, model_dir=MODEL_CHECKPOINT_DIR)
classifier.fit(x_train, label_df , steps=0)
#Load test dataframe
test_df = pd.read_csv(ROOT_DATA_PATH + TEST_FILE_NAME)
x_test = reshapeDataframe(test_df, 28, 28)
#Predict the test dataframe
predictionList = [i['class'] for i in list(classifier.predict(x_test, batch_size=256))]
#Manipulate DATAFRAME in oprder to satisfy KAGGLE requirements
submission_pd = pd.DataFrame(predictionList, columns=['Label'])
submission_pd['ImageId'] = range(1, len(submission_pd) + 1)
to_submit_pd = pd.DataFrame(submission_pd['ImageId'])
to_submit_pd['Label'] = submission_pd['Label']
to_submit_pd.to_csv(path_or_buf=SUBMISSION_FILE, sep=',', index=False)
| mit |
elkeschaper/hts | hts/data_tasks/test/qc_test.py | 1 | 10123 | import numpy as np
import os
import pytest
from hts.plate_data import readout
from hts.plate import plate
from hts.data_tasks import qc_detect_data_issues, qc_knitr, qc_matplotlib
from hts.run import run
# Test file names
TEST_PLATE = [['41092', '43724', '41396', '39340', '39628', '38404', '36288', '39940', '43876', '41504', '44136', '41752', '42672', '43688', '42184', '41928', '44800', '43740', '38072', '37856', '39044', '37980', '39912', '36776'], ['39112', '42112', '43500', '41000', '40884', '43528', '46964', '42512', '44248', '45192', '38340', '43912', '41304', '38288', '45236', '42384', '44440', '43536', '43008', '39776', '39528', '35932', '41536', '32064'], ['45496', '40820', '41100', '46476', '39560', '40920', '46572', '41188', '46276', '44584', '44948', '44428', '44416', '44040', '44108', '41192', '40348', '41884', '40304', '42316', '39980', '40056', '35788', '38100'], ['42628', '38108', '44816', '44224', '40952', '45208', '47668', '39364', '45816', '44892', '44960', '44528', '42480', '43468', '45628', '44096', '40568', '47376', '42268', '37628', '37292', '41044', '39812', '36528'], ['44520', '43944', '37680', '43504', '44516', '37656', '41716', '41936', '46856', '41536', '45652', '42504', '43796', '43164', '41432', '43344', '44960', '41020', '40196', '40288', '37480', '37552', '36744', '36140'], ['37444', '40492', '42452', '46168', '41368', '43644', '44048', '43632', '44840', '41208', '43516', '45000', '44624', '44336', '43580', '41588', '43012', '40368', '37056', '41784', '38008', '35168', '38496', '37740'], ['38764', '41884', '41272', '41160', '42644', '43076', '41184', '44008', '39824', '44064', '47928', '43604', '40460', '43372', '41588', '39540', '42608', '40564', '37880', '39360', '40244', '42352', '40808', '41188'], ['39388', '42016', '39660', '42104', '42592', '41000', '44788', '43292', '43252', '43408', '40436', '39420', '44192', '43356', '38532', '44824', '41924', '43012', '41560', '38920', '39428', '38956', '39060', '38008'], ['42016', '42580', '40560', '41752', '37584', '39072', '37880', '43280', '42448', '42676', '40748', '46412', '40744', '44752', '42548', '43212', '45728', '40896', '36984', '37672', '39920', '38240', '37316', '36176'], ['43012', '41256', '41376', '45172', '41232', '42236', '43852', '44996', '42396', '40532', '41232', '43460', '41312', '41576', '40608', '37192', '41676', '39988', '40780', '37000', '35240', '37900', '40964', '38412'], ['40316', '42424', '40088', '42292', '43820', '35108', '41816', '43744', '41244', '42576', '41028', '44104', '40608', '41892', '39024', '44096', '45260', '36696', '39956', '41856', '38028', '38100', '38832', '38280'], ['38696', '40624', '39880', '40616', '39520', '41776', '40504', '43680', '38960', '44908', '41440', '42988', '39112', '45088', '38560', '40668', '39340', '40632', '39092', '36572', '36496', '37608', '37784', '36784'], ['41056', '47092', '44220', '42096', '41496', '41976', '42152', '40548', '46520', '43788', '39340', '43116', '40908', '42964', '38040', '40668', '42796', '46304', '40736', '38836', '39916', '38680', '39332', '36628'], ['40760', '41172', '40036', '43752', '39276', '43540', '41096', '37604', '42408', '43800', '42364', '47256', '39104', '44436', '40704', '42152', '43900', '43540', '39792', '37140', '41488', '39816', '35396', '36804'], ['43240', '44080', '36664', '37020', '40132', '37444', '39816', '42924', '45404', '40572', '37816', '42344', '43648', '43768', '39628', '38836', '43212', '41588', '38964', '39884', '40308', '40476', '40120', '37996'], ['38036', '39988', '41336', '38140', '40928', '43584', '37888', '41932', '37888', '41396', '38016', '38688', '37364', '42824', '36408', '35100', '39968', '44780', '40648', '33520', '32912', '34748', '37528', '34236']]
TEST_PLATE_DATA_NAME = "test_plate_data_1"
TEST_PLATE2 = [['35904', '38436', '36572', '34976', '34720', '40260', '37960', '36836', '38596', '37520', '38840', '39452', '37096', '41808', '38532', '38364', '35268', '37928', '38188', '43788', '40524', '35444', '36660', '32136'], ['36852', '38076', '41300', '41624', '37672', '39952', '39116', '43628', '42796', '35612', '41504', '42168', '40300', '37984', '40380', '36324', '40672', '39192', '36004', '38192', '36656', '36816', '35280', '35800'], ['35764', '41312', '40572', '40632', '41696', '41092', '37072', '36396', '42052', '45144', '41164', '38624', '43136', '44648', '36852', '42172', '38384', '41660', '39512', '35696', '39568', '34640', '37752', '34460'], ['38976', '36604', '41640', '36520', '36512', '43516', '43996', '39616', '43508', '37828', '40264', '42168', '42264', '40964', '40632', '38176', '38008', '37600', '42368', '35336', '37560', '40500', '39448', '35296'], ['37052', '39644', '40644', '41500', '36232', '38576', '35612', '37468', '44124', '41296', '44080', '42700', '38728', '40148', '37468', '37112', '37804', '38304', '39124', '39664', '38164', '39600', '39660', '38476'], ['40240', '39652', '36912', '38168', '37832', '39740', '35612', '38584', '40128', '41392', '41604', '42084', '40472', '41388', '36432', '40448', '37944', '39688', '37836', '36992', '39744', '33880', '40936', '37272'], ['35648', '38012', '39776', '41592', '37208', '38916', '40764', '41180', '42012', '40216', '38608', '38916', '39528', '39508', '37616', '39320', '41228', '40792', '42560', '39092', '38640', '38848', '36572', '34072'], ['35512', '42116', '38736', '36336', '36708', '44028', '38796', '39924', '42160', '38216', '41256', '40692', '40848', '38296', '40324', '34296', '35076', '35496', '39036', '35168', '42352', '39352', '35236', '35748'], ['37544', '37368', '41456', '37176', '38484', '42068', '39260', '37128', '40676', '38060', '36096', '39856', '38672', '40152', '39132', '36032', '39444', '38912', '39588', '41600', '36584', '35372', '38664', '34564'], ['38948', '36652', '41880', '37276', '32792', '41304', '36700', '43524', '36028', '39196', '36824', '35240', '38620', '35696', '39884', '41860', '40136', '38212', '40092', '40064', '35284', '36972', '37272', '38692'], ['38672', '37260', '35948', '38024', '39148', '39376', '41644', '36740', '39948', '38180', '41576', '36252', '39396', '40496', '39192', '38872', '39712', '39064', '37672', '38360', '40980', '37820', '39020', '36076'], ['38760', '38500', '35804', '37224', '36472', '38140', '39416', '38244', '39516', '39220', '39472', '42396', '41340', '41140', '37048', '36104', '37596', '35708', '38652', '38952', '36896', '37728', '33708', '34252'], ['36668', '36644', '37440', '40568', '37304', '40248', '33352', '40756', '40544', '42508', '39616', '41584', '35860', '38328', '39284', '40612', '37988', '37404', '37196', '36132', '40120', '36848', '36764', '37204'], ['38700', '38788', '38644', '38404', '36208', '38768', '42368', '43348', '35972', '39348', '39468', '42156', '39336', '42684', '36400', '36420', '40008', '38384', '37616', '34824', '36784', '39424', '37864', '37172'], ['33752', '39016', '39412', '43360', '36772', '38040', '37168', '39888', '39700', '40028', '40624', '37896', '36884', '44620', '40552', '35896', '35236', '35756', '37352', '39692', '35056', '33960', '41580', '39072'], ['35000', '37800', '37160', '36280', '34776', '37636', '37664', '37756', '37800', '34920', '38676', '37260', '41132', '40540', '37292', '40724', '36516', '35068', '38052', '36460', '35100', '37428', '35612', '36012']]
TEST_PLATE_DATA_NAME2 = "test_plate_data_2"
TEST_DATA = {TEST_PLATE_DATA_NAME: TEST_PLATE, TEST_PLATE_DATA_NAME2: TEST_PLATE2}
TEST_PLATE_NAME = "test_plate"
TEST_RUN_CONFIG_SIRNA = "run_config_siRNA_1.txt"
TEST_RUN_CONFIG_GLO = "run_config_siRNA_2.txt"
notfixed = pytest.mark.notfixed
@pytest.fixture
def path():
"""Return the path to the test data files.
"""
return os.path.join(os.path.abspath('.'), '../', 'test_data')
@pytest.mark.no_external_software_required
def test_qc_knitr_report(path):
test_run = run.Run.create(origin="config", path=os.path.join(path, "Runs", TEST_RUN_CONFIG_SIRNA))
test_qc_result_path = os.path.join(path, "QC", "qc_test")
test_qc_helper_methods_path = os.path.join(os.path.abspath('.'), "qc", "qc_helper_methods.R")
test_qc_methods = {}
qc_knitr.create_report(run=test_run,
path=test_qc_result_path,
qc_helper_methods_path=test_qc_helper_methods_path,
methods=test_qc_methods,
config_data= None,
knit_html = True)
assert os.path.isfile(os.path.join(test_qc_result_path, "qc_report.html"))
@notfixed
@pytest.mark.no_external_software_required
def test_create_basic_heatmap(path):
test_readout = readout.Readout(data=TEST_DATA)
test_plate = plate.Plate(data={"readout": test_readout}, height=test_readout.height, width=test_readout.width, name=TEST_PLATE_NAME)
ax = qc_matplotlib.heat_map_single(test_plate.readout.data[TEST_PLATE_DATA_NAME][(0,0)])
axes = qc_matplotlib.heat_map_multiple(test_readout)
@pytest.mark.no_external_software_required
def test_create_data_issue_file_glo(path):
test_run = run.Run.create(origin="config", path=os.path.join(path, "Runs", TEST_RUN_CONFIG_GLO))
assert type(test_run) == run.Run
assert len(test_run.plates) == 10
TEST_GLO_READOUT = "realtime-glo_1"
TEST_GLO_CONTROL_SAMPLE_TYPE = "pos"
TEST_GLO_SAMPLE_SAMPLE_TYPE = ["s"]
TEST_DATAISSUE_TAG = "realtime-glo"
TEST_DATA_ISSUE_OUTPUT = os.path.join("Data_issues", "RealTime-Glo")
TEST_DATAISSUE_FILE = os.path.join(path, TEST_DATA_ISSUE_OUTPUT)
test_issues = qc_detect_data_issues.detect_low_cell_viability(run=test_run,
control_readout_tag=TEST_GLO_READOUT,
control_sample_type=TEST_GLO_CONTROL_SAMPLE_TYPE,
controlled_sample_types=TEST_GLO_SAMPLE_SAMPLE_TYPE,
data_issue_tag=TEST_DATAISSUE_TAG,
path=TEST_DATAISSUE_FILE)
assert type(test_issues) == dict
| gpl-2.0 |
Bismarrck/pymatgen | pymatgen/analysis/interface_reactions.py | 4 | 16198 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
from pymatgen import Composition
from pymatgen.analysis.phase_diagram import GrandPotentialPhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction
"""
This module provides class to generate and analyze interfacial reactions.
"""
__author__ = "Yihan Xiao"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Yihan Xiao"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Aug 15 2017"
class InterfacialReactivity:
"""
An object encompassing all relevant data for interface reactions.
Args:
c1 (Composition): Composition object for reactant 1.
c2 (Composition): Composition object for reactant 2.
pd (PhaseDiagram): PhaseDiagram object or GrandPotentialPhaseDiagram
object built from all elements in composition c1 and c2.
norm (bool): Whether or not the total number of atoms in composition
of reactant will be normalized to 1.
include_no_mixing_energy (bool): No_mixing_energy for a reactant is the
opposite number of its energy above grand potential convex hull. In
cases where reactions involve elements reservoir, this param
determines whether no_mixing_energy of reactants will be included
in the final reaction energy calculation. By definition, if pd is
not a GrandPotentialPhaseDiagram object, this param is False.
pd_non_grand (PhaseDiagram): PhaseDiagram object but not
GrandPotentialPhaseDiagram object built from elements in c1 and c2.
"""
def __init__(self, c1, c2, pd, norm=True, include_no_mixing_energy=False,
pd_non_grand=None):
self.grand = isinstance(pd, GrandPotentialPhaseDiagram)
# if include_no_mixing_energy is True, pd should be a
# GrandPotentialPhaseDiagram object and pd_non_grand should be given.
assert not (include_no_mixing_energy and not self.grand), \
'Please provide grand phase diagram to compute no_mixing_energy!'
assert not (include_no_mixing_energy and not pd_non_grand),\
'Please provide non-grand phase diagram to compute ' \
'no_mixing_energy!'
# Keeps copy of original compositions.
self.c1_original = c1
self.c2_original = c2
# Two sets of composition attributes for two processing conditions:
# normalization with and without exluding element(s) from reservoir.
self.c1 = c1
self.c2 = c2
self.comp1 = c1
self.comp2 = c2
self.norm = norm
self.pd = pd
if pd_non_grand:
self.pd_non_grand = pd_non_grand
# Factor is the compositional ratio between composition self.c1 and
# processed composition self.comp1. E.g., factor for
# Composition('SiO2') and Composition('O') is 2.0. This factor will
# be used to convert mixing ratio in self.comp1 - self.comp2
# tie line to that in self.c1 - self.c2 tie line.
self.factor1 = 1
self.factor2 = 1
if self.grand:
# Excludes element(s) from reservoir.
self.comp1 = Composition({k: v for k, v in c1.items()
if k not in pd.chempots})
self.comp2 = Composition({k: v for k, v in c2.items()
if k not in pd.chempots})
# Calculate the factors in case where self.grand = True and
# self.norm = True.
factor1 = self.comp1.num_atoms / c1.num_atoms
factor2 = self.comp2.num_atoms / c2.num_atoms
if self.norm:
self.c1 = c1.fractional_composition
self.c2 = c2.fractional_composition
self.comp1 = self.comp1.fractional_composition
self.comp2 = self.comp2.fractional_composition
if self.grand:
# Only when self.grand = True and self.norm = True
# will self.factor be updated.
self.factor1 = factor1
self.factor2 = factor2
# Computes energies for reactants in different scenarios.
if not self.grand:
# Use entry energy as reactant energy if no reservoir is present.
self.e1 = self._get_entry_energy(self.pd, self.comp1)
self.e2 = self._get_entry_energy(self.pd, self.comp2)
else:
if include_no_mixing_energy:
# Computing grand potentials needs compositions containing
# element(s) from reservoir, so self.c1 and self.c2 are used.
self.e1 = self._get_grand_potential(self.c1)
self.e2 = self._get_grand_potential(self.c2)
else:
self.e1 = self.pd.get_hull_energy(self.comp1)
self.e2 = self.pd.get_hull_energy(self.comp2)
def _get_entry_energy(self, pd, composition):
"""
Finds the lowest entry energy for entries matching the composition.
Entries with non-negative formation energies are excluded.
Args:
pd (PhaseDiagram): PhaseDiagram object.
composition (Composition): Composition object that the target
entry should match.
Returns:
The lowest entry energy among entries matching the composition.
"""
candidate = [i.energy_per_atom for i in pd.qhull_entries if
i.composition.fractional_composition ==
composition.fractional_composition]
assert candidate != [], 'The reactant {} has no matching entry with ' \
'negative formation energy!'.format(
composition.reduced_formula)
min_entry_energy = min(candidate)
return min_entry_energy * composition.num_atoms
def _get_grand_potential(self, composition):
"""
Computes the grand potential Phi at a given composition and
chemical potential(s).
E.g., Phi[c, mu_{Li}]= E_{hull}[c] - n_{Li}[c]mu_{Li}.
Args:
composition (Composition): Composition object.
Returns:
Grand potential at a given composition at chemical potential(s).
"""
grand_potential = self._get_entry_energy(self.pd_non_grand,
composition)
grand_potential -= sum([composition[e] * mu
for e, mu in self.pd.chempots.items()])
if self.norm:
# Normalizes energy to the composition excluding element(s)
# from reservoir.
grand_potential /= \
(1 - sum([composition.get_atomic_fraction(e.symbol)
for e, mu in self.pd.chempots.items()]))
return grand_potential
def _get_energy(self, x):
"""
Computes reaction energy at mixing ratio x : (1-x) for
self.comp1 : self.comp2.
Args:
x (float): Mixing ratio x of reactants, a float between 0 and 1.
Returns:
Reaction energy.
"""
return self.pd.get_hull_energy(self.comp1 * x + self.comp2 * (1-x)) - \
self.e1 * x - self.e2 * (1-x)
def _get_reaction(self, x, normalize=False):
"""
Generates balanced reaction at mixing ratio x : (1-x) for
self.comp1 : self.comp2.
Args:
x (float): Mixing ratio x of reactants, a float between 0 and 1.
normalize (bool): Whether or not to normalize the sum of
coefficients of reactants to 1. For not normalized case,
use original reactant compositions in reaction for clarity.
Returns:
Reaction object.
"""
mix_comp = self.comp1 * x + self.comp2 * (1-x)
decomp = self.pd.get_decomposition(mix_comp)
if normalize:
reactant = list(set([self.c1, self.c2]))
else:
# Uses original composition for reactants.
reactant = list(set([self.c1_original, self.c2_original]))
if self.grand:
reactant += [Composition(e.symbol)
for e, v in self.pd.chempots.items()]
product = [Composition(k.name) for k, v in decomp.items()]
reaction = Reaction(reactant, product)
if normalize:
x = self._convert(x, self.factor1, self.factor2)
if x == 1:
reaction.normalize_to(self.c1, x)
else:
reaction.normalize_to(self.c2, 1-x)
return reaction
def get_products(self):
"""
List of formulas of potential products. E.g., ['Li','O2','Mn'].
"""
products = set()
for _, _, _, react in self.get_kinks():
products = products.union(set([k.reduced_formula
for k in react.products]))
return list(products)
def _convert(self, x, factor1, factor2):
"""
Converts mixing ratio x in comp1 - comp2 tie line to that in
c1 - c2 tie line.
Args:
x (float): Mixing ratio x in comp1 - comp2 tie line, a float
between 0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.0.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in c1 - c2 tie line, a float between 0 and 1.
"""
return x * factor2 / ((1-x) * factor1 + x * factor2)
def _reverse_convert(self, x, factor1, factor2):
"""
Converts mixing ratio x in c1 - c2 tie line to that in
comp1 - comp2 tie line.
Args:
x (float): Mixing ratio x in c1 - c2 tie line, a float between
0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1.
"""
return x * factor1 / ((1-x) * factor2 + x * factor1)
def get_kinks(self):
"""
Finds all the kinks in mixing ratio where reaction products changes
along the tie line of composition self.c1 and composition self.c2.
Returns:
Zip object of tuples (index, mixing ratio,
reaction energy, reaction formula).
"""
c1_coord = self.pd.pd_coords(self.comp1)
c2_coord = self.pd.pd_coords(self.comp2)
n1 = self.comp1.num_atoms
n2 = self.comp2.num_atoms
critical_comp = self.pd.get_critical_compositions(self.comp1,
self.comp2)
x_kink, energy_kink, react_kink = [], [], []
if all(c1_coord == c2_coord):
x_kink = [0, 1]
energy_kink = [self._get_energy(x) for x in x_kink]
react_kink = [self._get_reaction(x) for x in x_kink]
else:
for i in reversed(critical_comp):
# Gets mixing ratio x at kinks.
c = self.pd.pd_coords(i)
x = np.linalg.norm(c - c2_coord) / \
np.linalg.norm(c1_coord - c2_coord)
# Modifies mixing ratio in case compositions self.comp1 and
# self.comp2 are not normalized.
x = x * n2 / (n1 + x * (n2 - n1))
# Converts mixing ratio in comp1 - comp2 tie line to that in
# c1 - c2 tie line.
x_converted = self._convert(x, self.factor1, self.factor2)
x_kink.append(x_converted)
# Gets reaction energy at kinks
energy_kink.append(self._get_energy(x))
# Gets balanced reaction at kinks
react_kink.append(self._get_reaction(x))
index_kink = range(1, len(critical_comp)+1)
return zip(index_kink, x_kink, energy_kink, react_kink)
def labels(self):
"""
Returns a dictionary containing kink information:
{index: 'x= mixing_ratio energy= reaction_energy reaction_equation'}.
E.g., {1: 'x= 0.0 energy = 0.0 Mn -> Mn',
2: 'x= 0.5 energy = -15.0 O2 + Mn -> MnO2',
3: 'x= 1.0 energy = 0.0 O2 -> O2'}.
"""
return {j: 'x= ' + str(round(x, 4)) + ' energy = ' +
str(round(energy, 4)) + ' ' + str(reaction)
for j, x, energy, reaction in self.get_kinks()}
def plot(self):
"""
Plots reaction energy as a function of mixing ratio x in
self.c1 - self.c2 tie line using pylab.
Returns:
Pylab object that plots reaction energy as a function of
mixing ratio x.
"""
plt.rcParams['xtick.major.pad'] = '6'
plt.rcParams['ytick.major.pad'] = '6'
plt.rcParams['axes.linewidth'] = 2
npoint = 1000
xs = np.linspace(0, 1, npoint)
# Converts sampling points in self.c1 - self.c2 tie line to those in
# self.comp1 - self.comp2 tie line.
xs_reverse_converted = self._reverse_convert(xs, self.factor1,
self.factor2)
energies = [self._get_energy(x) for x in xs_reverse_converted]
plt.plot(xs, energies, 'k-')
# Marks kinks and minimum energy point.
kinks = self.get_kinks()
_, x_kink, energy_kink, _, = zip(*kinks)
plt.scatter(x_kink, energy_kink, marker='o', c='blue', s=20)
plt.scatter(self.minimum()[0], self.minimum()[1], marker='*',
c='red', s=300)
# Labels kinks with indices. Labels are made draggable
# in case of overlapping.
for index, x, energy, reaction in kinks:
plt.annotate(
index,
xy=(x, energy), xytext=(5, 30),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->',
connectionstyle='arc3,rad=0')).draggable()
plt.xlim([-0.05, 1.05])
if self.norm:
plt.ylabel('Energy (eV/atom)')
else:
plt.ylabel('Energy (eV/f.u.)')
plt.xlabel('$x$ in $x$ {} + $(1-x)$ {}'.format(
self.c1.reduced_formula, self.c2.reduced_formula))
return plt
def minimum(self):
"""
Finds the minimum reaction energy E_min and corresponding
mixing ratio x_min.
Returns:
Tuple (x_min, E_min).
"""
return min([(x, energy) for _, x, energy, _ in self.get_kinks()],
key=lambda i: i[1])
def get_no_mixing_energy(self):
"""
Generates the opposite number of energy above grand potential
convex hull for both reactants.
Returns:
[(reactant1, no_mixing_energy1),(reactant2,no_mixing_energy2)].
"""
assert self.grand == 1, \
'Please provide grand potential phase diagram ' \
'for computing no_mixing_energy!'
energy1 = self.pd.get_hull_energy(self.comp1) - \
self._get_grand_potential(self.c1)
energy2 = self.pd.get_hull_energy(self.comp2) - \
self._get_grand_potential(self.c2)
unit = 'eV/f.u.'
if self.norm:
unit = 'eV/atom'
return [(self.c1_original.reduced_formula +
' ({0})'.format(unit), energy1),
(self.c2_original.reduced_formula +
' ({0})'.format(unit), energy2)]
| mit |
alialerwi/LearningRepository | code/introduction.py | 6 | 8189 | from __future__ import division
# at this stage in the book we haven't actually installed matplotlib,
# comment this out if you need to
from matplotlib import pyplot as plt
##########################
# #
# FINDING KEY CONNECTORS #
# #
##########################
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" },
{ "id": 10, "name": "Jen" }
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# first give each user an empty list
for user in users:
user["friends"] = []
# and then populate the lists with friendships
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
num_users = len(users)
avg_connections = total_connections / num_users # 2.4
################################
# #
# DATA SCIENTISTS YOU MAY KNOW #
# #
################################
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print friends_of_friend_ids(users[3]) # Counter({0: 2, 5: 1})
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
def most_common_interests_with(user_id):
return Counter(interested_user_id
for interest in interests_by_user_id["user_id"]
for interested_user_id in user_ids_by_interest[interest]
if interested_user_id != user_id)
###########################
# #
# SALARIES AND EXPERIENCE #
# #
###########################
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
def make_chart_salaries_by_tenure():
tenures = [tenure for salary, tenure in salaries_and_tenures]
salaries = [salary for salary, tenure in salaries_and_tenures]
plt.scatter(tenures, salaries)
plt.xlabel("Years Experience")
plt.ylabel("Salary")
plt.show()
# keys are years
# values are the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
def tenure_bucket(tenure):
if tenure < 2: return "less than two"
elif tenure < 5: return "between two and five"
else: return "more than five"
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.iteritems()
}
#################
# #
# PAID_ACCOUNTS #
# #
#################
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0: return "paid"
elif years_experience < 8.5: return "unpaid"
else: return "paid"
######################
# #
# TOPICS OF INTEREST #
# #
######################
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
if __name__ == "__main__":
print
print "######################"
print "#"
print "# FINDING KEY CONNECTORS"
print "#"
print "######################"
print
print "total connections", total_connections
print "number of users", num_users
print "average connections", total_connections / num_users
print
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print "users sorted by number of friends:"
print sorted(num_friends_by_id,
key=lambda (user_id, num_friends): num_friends, # by number of friends
reverse=True) # largest to smallest
print
print "######################"
print "#"
print "# DATA SCIENTISTS YOU MAY KNOW"
print "#"
print "######################"
print
print "friends of friends bad for user 0:", friends_of_friend_ids_bad(users[0])
print "friends of friends for user 3:", friends_of_friend_ids(users[3])
print
print "######################"
print "#"
print "# SALARIES AND TENURES"
print "#"
print "######################"
print
print "average salary by tenure", average_salary_by_tenure
print "average salary by tenure bucket", average_salary_by_bucket
print
print "######################"
print "#"
print "# MOST COMMON WORDS"
print "#"
print "######################"
print
for word, count in words_and_counts.most_common():
if count > 1:
print word, count
| unlicense |
INTA-Radar/radar-cmd | Testing/Test.py | 1 | 3850 | # -*- coding: utf-8 -*-
import sys
sys.path.extend(['../'])
import matplotlib.cbook
import warnings
import os
os.environ['PYART_QUIET'] = 'True'
warnings.filterwarnings("ignore", category=FutureWarning)
from Procesador.RainbowRadarProcessor import RainbowRadarProcessor
from Procesador.RainbowRadar import RainbowRadar,ZDR,dBZ,uPhiDP,RhoHV,V
from Procesador.Utils import PNG,JPEG
from Procesador.Precipitation import Precipitation
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
files = {
'f_240_ZDR' : [ZDR,'datos/240/2009112306100500ZDR.vol'],
'f_240_dBZ' : [dBZ,'datos/240/2009112306100500dBZ.vol'],
'f_240_uPhiDP' : [uPhiDP,'datos/240/2009112306100500uPhiDP.vol'],
'f_240_RhoHV' : [RhoHV,'datos/240/2009112306100500RhoHV.vol'],
'f_480_dBZ' : [dBZ,'datos/480/2016122000073000dBZ.azi'],
'f_120_RhoHV' : [RhoHV,'datos/120/2009112306135000RhoHV.vol'],
'f_120_uPhiDP' : [uPhiDP,'datos/120/2009112306135000uPhiDP.vol'],
'f_120_dBZ' : [dBZ,'datos/120/2009112306135000dBZ.vol'],
'f_120_ZDR' : [ZDR,'datos/120/2009112306135000ZDR.vol'],
'f_x_dBZ' : [dBZ,'datos/2009112306135000dBZ.vol'],
'f_x_V' : [V,'datos/2015080902143600V.vol'],
'f_400_dBZ' : [dBZ,'datos/2018111211413100dBZ.azi']
}
files_precipitaciones = {
'f_240_P' : [dBZ,'datos/precipitaciones/2009122815300200dBZ.vol']
}
# Precipitaciones
for name,file in files_precipitaciones.items():
print(name,' ; ',file)
rr = RainbowRadar('',file[1], radarVariable=file[0])
pp = Precipitation(rr)
pp.computePrecipitations(0)
p = RainbowRadarProcessor(rainbowRadar=pp.genRainRainbowRadar())
p.saveImageToFile(imageType=PNG, pathOutput='res/', fileOutput=name,
image_method_params={'level': 0,
'paddingImg': 1}
)
p.saveImageToFile(imageType=JPEG, pathOutput='res/', fileOutput=name)
p.saveImageToFile(imageType=PNG, pathOutput='res/', fileOutput=name + '_simple',
method='simple',
image_method_params={'elevation': 0,
'paddingImg': 1}
)
p.saveImageToFile(imageType=JPEG, pathOutput='res/', fileOutput=name + '_simple',
method='simple',
image_method_params={'elevation': 0,
'paddingImg': 1})
p.saveToGTiff(0, outFilePath='res/', outFileName=name)
p.saveToNETCDF(outFilePath='res/', outFileName=name)
#################################################################
# Simple images
for name,file in files.items():
print(name,' ; ',file)
rr = RainbowRadar('',file[1], radarVariable=file[0])
p = RainbowRadarProcessor(rainbowRadar=rr)
p.saveImageToFile(imageType=PNG, pathOutput='res/', fileOutput=name,
image_method_params = {'level': 0,
'paddingImg':1})
p.saveImageToFile(imageType=JPEG, pathOutput='res/', fileOutput=name,
image_method_params= {'level': 0,
'paddingImg': 1})
p.saveImageToFile(imageType=PNG,pathOutput='res/', fileOutput=name+'_simple', method='simple',
image_method_params= {'elevation': 0,
'paddingImg': 1})
p.saveImageToFile(imageType=JPEG, pathOutput='res/', fileOutput=name+'_simple', method='simple',
image_method_params= {'elevation': 0,
'paddingImg': 1})
p.saveToGTiff(0, outFilePath='res/',outFileName=name)
p.saveToNETCDF(outFilePath='res/',outFileName=name)
| gpl-3.0 |
kcompher/FreeDiscovUI | freediscovery/categorization.py | 1 | 14079 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
import scipy
from scipy.special import logit, expit
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from .base import _BaseWrapper
from .utils import setup_model, _rename_main_thread
from .neighbors import NearestCentroidRanker, NearestNeighborRanker
from .exceptions import (ModelNotFound, WrongParameter, NotImplementedFD, OptionalDependencyMissing)
def explain_binary_categorization(estimator, vocabulary, X_row):
"""Explain the binary categorization results
Parameters
----------
estimator : sklearn.base.BaseEstimator
the binary categorization estimator
(must have a `decision_function` method)
vocabulary : list [n_features]
vocabulary (list of words or n-grams)
X_row : sparse CSR ndarray [n_features]
a row of the document term matrix
"""
if X_row.ndim != 2 or X_row.shape[0] != 1:
raise ValueError('X_row must be an 2D sparse array,'
'with shape (1, N) not {}'.format(X_row.shape))
if X_row.shape[1] != len(vocabulary):
raise ValueError(
'The vocabulary length ({}) does not match '.format(len(vocabulary)) +\
'the number of features in X_row ({})'.format(X_row.shape[1]))
vocabulary_inv = {ind: key for key, ind in vocabulary.items()}
if type(estimator).__name__ == 'LogisticRegression':
coef_ = estimator.coef_
if X_row.shape[1] != coef_.shape[1]:
raise ValueError("Coefficients size {} does not match n_features={}".format(
coef_.shape[1], X_row.shape[1]))
indices = X_row.indices
weights = X_row.data*estimator.coef_[0, indices]
weights_dict = {}
for ind, value in zip(indices, weights):
key = vocabulary_inv[ind]
weights_dict[key] = value
return weights_dict
else:
raise NotImplementedError()
class _CategorizerWrapper(_BaseWrapper):
""" Document categorization model
The option `use_hashing=True` must be set for the feature extraction.
Recommended options also include, `use_idf=1, sublinear_tf=0, binary=0`.
Parameters
----------
cache_dir : str
folder where the model will be saved
parent_id : str, optional
dataset id
mid : str, optional
model id
cv_scoring : str, optional, default='roc_auc'
score that is used for Cross Validation, cf. sklearn
cv_n_folds : str, optional
number of K-folds used for Cross Validation
"""
_wrapper_type = "categorizer"
def __init__(self, cache_dir='/tmp/', parent_id=None, mid=None,
cv_scoring='roc_auc', cv_n_folds=3):
super(_CategorizerWrapper, self).__init__(cache_dir=cache_dir,
parent_id=parent_id,
mid=mid, load_model=True)
if mid is not None:
self.le = joblib.load(os.path.join(self.model_dir, mid, 'label_encoder'))
self.cv_scoring = cv_scoring
self.cv_n_folds = cv_n_folds
@staticmethod
def _build_estimator(Y_train, method, cv, cv_scoring, cv_n_folds, **options):
if cv:
#from sklearn.cross_validation import StratifiedKFold
#cv_obj = StratifiedKFold(n_splits=cv_n_folds, shuffle=False)
cv_obj = cv_n_folds # temporary hack (due to piclking issues otherwise, this needs to be fixed)
else:
cv_obj = None
_rename_main_thread()
if method == 'LinearSVC':
from sklearn.svm import LinearSVC
if cv is None:
cmod = LinearSVC(**options)
else:
try:
from freediscovery_extra import make_linearsvc_cv_model
except ImportError:
raise OptionalDependencyMissing('freediscovery_extra')
cmod = make_linearsvc_cv_model(cv_obj, cv_scoring, **options)
elif method == 'LogisticRegression':
from sklearn.linear_model import LogisticRegression
if cv is None:
cmod = LogisticRegression(**options)
else:
try:
from freediscovery_extra import make_logregr_cv_model
except ImportError:
raise OptionalDependencyMissing('freediscovery_extra')
cmod = make_logregr_cv_model(cv_obj, cv_scoring, **options)
elif method == 'NearestCentroid':
cmod = NearestCentroidRanker()
elif method == 'NearestNeighbor':
cmod = NearestNeighborRanker()
elif method == 'xgboost':
try:
import xgboost as xgb
except ImportError:
raise OptionalDependencyMissing('xgboost')
if cv is None:
try:
from freediscovery_extra import make_xgboost_model
except ImportError:
raise OptionalDependencyMissing('freediscovery_extra')
cmod = make_xgboost_model(cv_obj, cv_scoring, **options)
else:
try:
from freediscovery_extra import make_xgboost_cv_model
except ImportError:
raise OptionalDependencyMissing('freediscovery_extra')
cmod = make_xgboost_cv_model(cv, cv_obj, cv_scoring, **options)
elif method == 'MLPClassifier':
if cv is not None:
raise NotImplementedFD('CV not supported with MLPClassifier')
from sklearn.neural_network import MLPClassifier
cmod = MLPClassifier(solver='adam', hidden_layer_sizes=10,
max_iter=200, activation='identity', verbose=0)
else:
raise WrongParameter('Method {} not implemented!'.format(method))
return cmod
def train(self, index, y, method='LinearSVC', cv=None):
"""
Train the categorization model
Parameters
----------
index : array-like, shape (n_samples)
document indices of the training set
y : array-like, shape (n_samples)
target class relative to index (string or int)
method : str
the ML algorithm to use (one of "LogisticRegression", "LinearSVC", 'xgboost')
cv : str
use cross-validation
Returns
-------
cmod : sklearn.BaseEstimator
the scikit learn classifier object
Y_train : array-like, shape (n_samples)
training predictions
"""
valid_methods = ["LinearSVC", "LogisticRegression", "xgboost",
"NearestCentroid", "NearestNeighbor"]
if method in ['MLPClassifier']:
raise WrongParameter('method={} is implemented but not production ready. It was disabled for now.'.format(method))
if method not in valid_methods:
raise WrongParameter('method={} is not supported, should be one of {}'.format(
method, valid_methods))
if cv is not None and method in ['NearestNeighbor', 'NearestCentroid']:
raise WrongParameter('Cross validation (cv={}) not supported with {}'.format(
cv, method))
if cv not in [None, 'fast', 'full']:
raise WrongParameter('cv')
d_all = self.pipeline.data
X_train = d_all[index, :]
Y_labels = y
self.le = LabelEncoder()
Y_train = self.le.fit_transform(Y_labels)
cmod = self._build_estimator(Y_train, method, cv, self.cv_scoring, self.cv_n_folds)
mid, mid_dir = setup_model(self.model_dir)
if method == 'xgboost' and not cv:
cmod.fit(X_train, Y_train, eval_metric='auc')
else:
cmod.fit(X_train, Y_train)
joblib.dump(self.le, os.path.join(mid_dir, 'label_encoder'))
joblib.dump(cmod, os.path.join(mid_dir, 'model'))
pars = {
'method': method,
'index': index,
'y': y,
'categories': self.le.classes_
}
pars['options'] = cmod.get_params()
self._pars = pars
joblib.dump(pars, os.path.join(mid_dir, 'pars'))
self.mid = mid
self.cmod = cmod
return cmod, Y_train
def predict(self, chunk_size=5000, kind='probability'):
"""
Predict the relevance using a previously trained model
Parameters
----------
chunck_size : int
chunck size
kind : str
type of the output in ['decision_function', 'probability'], only affects ML methods. The nearest Neighbor ranker always return cosine similarities in any case.
Returns
-------
res : ndarray [n_samples, n_classes]
the score for each class
nn_ind : {ndarray [n_samples, n_classes], None}
the index of the nearest neighbor for each class (when the NearestNeighborRanker is used)
"""
if kind not in ['probability', 'decision_function']:
raise ValueError("Wrong input value kind={}, must be one of ['probability', 'decision_function']".format(kind))
if kind == 'probability':
kind = 'predict_proba'
if self.cmod is not None:
cmod = self.cmod
else:
raise WrongParameter('The model must be trained first, or sid must be provided to load\
a previously trained model!')
ds = self.pipeline.data
nn_ind = None
if isinstance(cmod, NearestNeighborRanker):
res, nn_ind = cmod.kneighbors(ds)
elif hasattr(cmod, kind):
res = getattr(cmod, kind)(ds)
elif hasattr(cmod, 'decision_function'):
# and we need predict_proba
res = cmod.decision_function(ds)
res = expit(res)
elif hasattr(cmod, 'predict_proba'):
# and we need decision_function
res = cmod.predict_proba(ds)
res = logit(res)
else:
raise ValueError('Model {} has neither decision_function nor predict_proba methods!'.format(cmod))
# handle the case of binary categorization
if res.ndim == 1:
if kind == 'decision_function':
res_p = res
res_n = - res
else:
res_p = res
res_n = 1 - res
res = np.hstack((res_n[:,None], res_p[:, None]))
return res, nn_ind
@staticmethod
def to_dict(Y_pred, nn_pred, labels, id_mapping,
max_result_categories=1, sort=False):
"""
Create a nested dictionary result that would be returned by
the REST API given the categorization results
Parameters
----------
Y_pred : ndarray [n_samples, n_categories]
the score for each class
nn_ind : {ndarray [n_samples, n_classes], None}
the index of the nearest neighbor for each class (when the NearestNeighborRanker is used)
labels : list
list of categories label
id_mapping : a pd.DataFrame
the metadata mapping from freediscovery.ingestion.DocumentIndex.data
max_result_categories : int
the maximum number of categories in the results
sort : bool
sort by the score of the most likely class
"""
if max_result_categories <= 0:
raise ValueError('the max_result_categories={} must be strictly positive'.format(max_result_categories))
# have to cast to object as otherwise we get serializing np.int64 issues...
base_keys = [key for key in id_mapping.columns if key in ['internal_id',
'document_id',
'rendition_id']]
id_mapping = id_mapping[base_keys].set_index('internal_id', drop=True).astype('object')
def sort_func(x):
return x[0]
if nn_pred is not None:
outer_container = zip(Y_pred.tolist(), nn_pred.tolist())
else:
outer_container = ((y_row, None) for y_row in Y_pred.tolist())
res = []
for idx, (Y_row, nn_row) in enumerate(outer_container):
ires = {'internal_id': idx}
ires.update(id_mapping.loc[idx].to_dict())
iscores = []
if nn_row is not None:
# we have nearest neighbors results
for Y_el, nn_el, label_el in sorted(zip(Y_row, nn_row, labels),
key=sort_func, reverse=True)[:max_result_categories]:
iiel = {'score': Y_el, 'internal_id': nn_el, 'category': label_el}
iiel.update(id_mapping.loc[idx].to_dict())
iscores.append(iiel)
else:
# no nearest neighbors available
for Y_el, label_el in sorted(zip(Y_row, labels),
key=sort_func, reverse=True)[:max_result_categories]:
iiel = {'score': Y_el, 'category': label_el}
iscores.append(iiel)
ires['scores'] = iscores
res.append(ires)
if sort:
res = sorted(res, key=lambda x: x['scores'][0]['score'], reverse=True)
return {'data': res}
def _load_pars(self, mid=None):
"""Load model parameters from disk"""
if mid is None:
mid = self.mid
mid_dir = os.path.join(self.model_dir, mid)
pars = super(_CategorizerWrapper, self)._load_pars(mid)
cmod = joblib.load(os.path.join(mid_dir, 'model'))
pars['options'] = cmod.get_params()
return pars
| bsd-3-clause |
tkcroat/Augerquant | Development/QMpixarr_setup_gui_tk.py | 1 | 62791 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 9 13:41:13 2017
@author: tkc
"""
import os
import tkinter as tk
import tkinter.messagebox as tkmess
from tkinter import filedialog
import matplotlib as mpl # using path, figure, rcParams
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.widgets import Lasso
from matplotlib import path
import pandas as pd
import numpy as np
from PIL import Image, ImageDraw
import shutil, sys, fileinput
PLOT_SIZE = (8,6) # better on laptop
PLOT_SIZE = (7,6) # width, height
AESQUANTPARAMFILE='C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\AESquantparams.csv'
# Couldn't figure out proper encoding on some .phi file so just open/overwrite existing
AREAFILE='C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Params\\spatial_areas_sample_min.phi'
BEAMDEFLECT='FJS-10kVdeflected' # Setting to deflect beam off sample after quant map
def pixgui():
''' Launcher function for Auger quantmap pixarray creator '''
root = tk.Tk()
root.wm_title("QM pixarray setup")
GUIMain_pixarray(root)
root.mainloop()
return
class GUIMain_pixarray():
''' Main container for plotter, options (at right), and fileloader (bottom)
pass current working directory as default directory '''
def __init__(self, root):
self.root = root
self.root.wm_title("QM pixarray setup")
self.left_frame = tk.Frame(self.root)
self.image_frame = tk.Frame(self.left_frame)
self.image_frame.pack(side=tk.TOP)
self.opts_frame = tk.Frame(self.left_frame)
self.opts_frame.pack(side=tk.BOTTOM)
self.left_frame.pack(side=tk.LEFT)
self.multiplex_frame = tk.Frame(self.root)
self.multiplex_frame.pack(side=tk.LEFT)
self.pixviewer = Pixarrviewer(self.image_frame,self) # root,parent
self.pixopts = Pixarropts(self.opts_frame,self)
self.multparams = Multiplexopts(self.multiplex_frame, self)
# Menubars
self.menubar=tk.Menu(self.root)
filemenu = tk.Menu(self.menubar, tearoff=0)
filemenu.add_command(label="Load image", command=self.load_image)
filemenu.add_command(label="Save QM pixarray",
command=self.pixopts.save_pixarray)
filemenu.add_command(label="Save image", command=lambda:
self.args_popup_menu({'command':'save_image',
'entry':['filename',self.pixopts.pixarrname.get()],
'radio':['imtype',['full field','crop']],
'radio2':['overlay',['outline','grid','none']]}))
filemenu.add_command(label="Exit", command=self.on_quitapp)
self.menubar.add_cascade(label="File", menu=filemenu)
self.root.config(menu=self.menubar)
def on_quitapp(self):
msg = "Quitting:\nUnsaved progress will be lost.\nDo you wish to Continue?"
if tkmess.askokcancel("Quantmap",msg):
self.root.destroy()
def load_image(self):
''' Load standard QM (quantmap) file (paramlog with data returned to a DataManager '''
fullpath= filedialog.askopenfilename(title='Select sem image',
filetypes=[("jpg file","*.jpg"), ("Auger sem file","*.sem")])
self.pixviewer.load_image(fullpath)
def args_popup_menu(self, kwargs):
''' Menu launched top-level window to get entered args/kwargs entry and
then call GUIrois method (home of QMfile data object and other assorted methods)
kwargs: command - name of desired method
param name & associated value (e.g. kwargs={'filter size':1})
implement radiobuttons using 'radio':['plottype',['scatter','line']]
implement checkbuttons using 'check':['add backfit',True]
implement entries using 'entry':['filter size',1]
'''
def abort():
t.destroy()
def runcmd():
''' run command w/ entered args/kwargs '''
# Construct args, kwargs for method call
myargs={}
for i, (key, val) in enumerate(kwargs.items()):
if key!='command':
myargs.update({val[0]:tkvars[i].get()})
else:
myargs.update({'command':kwargs.get('command')})
self.pixviewer.runcmd(**myargs)
t.destroy()
t = tk.Toplevel(self.root) # open new toplevel window
tkvars=[] # Display and alter params passed in kwargs
# Key gives type of tkinter object
for i, (key, val) in enumerate(kwargs.items()):
if 'rad' in key: # Make radiobutton w/ choices list
prow=tk.Frame(t)
[param, choices]=kwargs.get(key,[])
tk.Label(prow, text=param).pack(side=tk.LEFT)
tkvars.append(tk.StringVar()) # single common variable for chosen radiobutton
for j, val in enumerate(choices): # list of opts for radiobutton
tk.Radiobutton(prow, text=val, value=val, variable=tkvars[i]).pack(side=tk.LEFT)
prow.pack(side=tk.TOP)
elif 'chk' in key: # each dict val has param name, default bool val as 2 item list
prow=tk.Frame(t)
[param, val]=kwargs.get(key,['',''])
tkvars.append(tk.BooleanVar())
tkvars[i].set(val)
tk.Checkbutton(prow, text=param, variable=tkvars[i]).pack(side=tk.LEFT)
prow.pack(side=tk.TOP)
elif 'ent' in key:
prow=tk.Frame(t)
[param, val]=kwargs.get(key,[])
tk.Label(prow, text=param).pack(side=tk.LEFT)
tkvars.append(tk.StringVar())
tk.Entry(prow, textvariable=tkvars[i]).pack(side=tk.LEFT)
prow.pack(side=tk.TOP)
elif key=='command': # put command name at top?
topframe=tk.Frame(t)
tkvars.append(tk.StringVar()) # unused dummy
tk.Label(topframe, text=key).pack(side=tk.LEFT)
topframe.pack(side=tk.TOP)
# Row for abort & run buttons
prow=tk.Frame(t)
tk.Button(prow, text='Abort', command=abort).pack(side=tk.LEFT)
mystr='Run '+kwargs.get('command','')
tk.Button(prow, text=mystr, command=runcmd).pack(side=tk.LEFT)
prow.pack(side=tk.TOP)
class NavMapToolbar(NavigationToolbar2TkAgg):
''' Custom matplotlib toolbar w/ lasso pt remover and point picker
parent is GUIplotter
'''
def __init__(self, canvas, root, parent):
self.canvas = canvas
self.root = root
self.parent = parent # plotter is Pixarrviewer
self.ax= self.parent.ax # axes needed for interaction
self.xys = None # for xy vals later associated with plot
self.selected = None # Holding area for selected indices
# Generic mpl toolbar using tkagg (with standard buttons)
NavigationToolbar2TkAgg.__init__(self, canvas, root)
# Create lasso and link to multi_select_callback
self.lasso_button= tk.Button(master=self, text='Select region', padx=2, pady=2, command=self.startlasso)
self.lasso_button.pack(side=tk.LEFT,fill="y")
self.showgrid_button= tk.Button(master=self, text='Show/hide grid', padx=2, pady=2,
command=self.parent.toggle_grid) # show/hide toggle in pixviewer
self.showgrid_button.pack(side=tk.LEFT,fill="y")
def startlasso(self):
''' Activated by lasso menu bar button on click; disconnects prior IDs, prep for lasso button press
'''
self.cid = self.canvas.mpl_connect('button_press_event', self.onpresslasso)
def onpresslasso(self, event):
''' Create lasso when button pressed on active canvas/axes '''
# ensure that current dataset is active
self.xys = self.parent.xys # pixels list passed from plotter (parent)
self.lasso = Lasso(event.inaxes, (event.xdata, event.ydata), self.callbacklasso)
# self.canvas.widgetlock(self.lasso) # skip... gives ValueError: already locked
def callbacklasso(self, verts):
print('Verts length is', len(verts))
# Verts is a list of x,y coords describing drawn path
p = path.Path(verts)
# true/false array
ind = p.contains_points(self.xys)
self.selected=[i for i in range(0, len(self.xys)) if ind[i]==True]
self.canvas.draw_idle()
# self.canvas.widgetlock.release(self.lasso) # valueerror you don't own this lock
del self.lasso
self.canvas.mpl_disconnect(self.cid) # disconnect lasso tool
def extract_spectrum(self):
''' Map only lassoed (or circular regTake single pixel or lassoed pixels, generate extracted spectrum '''
# Use method in GUIrois (which in turn calls QMfile method)
self.parent.parent.opts.map_custom(self.selected)
# TODO selected is probably sequential index # so convert to X,Y list
class Pixarrviewer():
'''Window for display of image and overlaid pixarray
lasso to set active/ inactive pixels
pixviewer can handle image --load, save w/ overlay, save/crop w/ overlay
'''
def __init__(self,root, parent):
self.root = root
self.parent = parent
self.figure = mpl.figure.Figure(figsize=PLOT_SIZE, dpi=100)
self.ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(bottom=0.15,right=0.95,top=0.95)
self.canvas = FigureCanvasTkAgg(self.figure, self.root)
self.xys = None # used by lasso
self.image = None # base 512x512 image for superimposition of array
self.directory = None
self.uniquename = None
self.gridbool = False # show/hide for overlaid array/grid
# TODO is grid necessary or just pass and modify full pixarray?
self.gridlist = [] # visual representation of current pixarray
# xys used for lasso selection
self.xys=[[i,j] for i in range(0,512) for j in range(0,512)]
# Custom navselecttoolbar w/ interactive buttons
self.toolbar = NavMapToolbar(self.canvas, self.root,self)
self.toolbar.update()
self.plot_widget = self.canvas.get_tk_widget()
self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas.show()
def pass_pixarray(self, pixarr):
''' Pass created pix array to viewer .. make grid for ImageDraw overlay '''
self.gridlist=[]
# pixarray already includes built in margin
for index, row in pixarr.iterrows():
self.gridlist.append([row.X1,row.Y1,row.X2,row.Y2])
def show_image(self):
''' Reload of base image (and optional grid overlay) into pixarrviewer window '''
try:
self.canvas.get_tk_widget().destroy() # destroy previous plot
self.toolbar.destroy()
except:
pass
print('Starting show_image.')
self.figure = mpl.figure.Figure(figsize=PLOT_SIZE, dpi=100)
self.figure.subplots_adjust(bottom=0.15,right=0.95,top=0.95)
self.ax = self.figure.add_subplot(111)
self.ax.imshow(self.image)
if self.gridbool:
draw=ImageDraw.Draw(self.image)
for i,[x1,y1,x2,y2] in enumerate(self.gridlist):
draw.rectangle((x1,y1,x2,y2), outline='red')
self.canvas = FigureCanvasTkAgg(self.figure, self.root)
self.toolbar = NavMapToolbar(self.canvas, self.root,self)
self.toolbar.update()
self.plot_widget = self.canvas.get_tk_widget()
self.plot_widget.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas.show()
print('Show image completed.')
def load_image(self, fullpath):
''' Load and resize base image'''
image=Image.open(fullpath)
if image.size[0]!=512:
image=image.resize((512,512), resample=Image.NEAREST)
(directory, filename)=os.path.split(fullpath)
self.directory = directory
self.uniquename = ".".join(filename.split('.')[0:-1])
self.image= image
self.show_image()
def mod_grid(self):
''' Modify grid on/off pixel regions based on lasso, rect selector
passed back to pixarropts before save of pixarray '''
# TODO finish lasso or rectangle selector mods of grid
pass
def toggle_grid(self):
''' Toggle visibility of overlaid QM pixarray grid '''
if self.gridbool:
self.gridbool=False
else:
self.gridbool=True
self.show_image() # clear and reload main view
def runcmd(self, **pkwargs):
''' Menu/main lauched with pop up arguments entered in toplevel
passed via pkwargs... can define multiple '''
command=pkwargs.get('command')
if command=='save_image':
self.save_image(pkwargs)
def save_image(self, pkwargs):
''' Various types of image saves
imtype: fullfield or crop (subtract margins)
overlay: outline, grid or none '''
print('pkwargs are:', pkwargs)
thisim=self.image
imtype=pkwargs.get('imtype','full field')
overlay=pkwargs.get('overlay','none')
fname=pkwargs.get('filename','none')
if overlay=='grid':
draw=ImageDraw.Draw(thisim)
for i,[x1,y1,x2,y2] in enumerate(self.gridlist):
draw.rectangle((x1,y1,x2,y2), outline='red')
elif overlay=='outline':
# find min x1, max x2, min y1, max y2
minx1=min([i[0] for i in self.gridlist])
maxx2=max([i[2] for i in self.gridlist])
miny1=min([i[1] for i in self.gridlist])
maxy2=max([i[3] for i in self.gridlist])
# single rectangular outline of quant mapped area
draw=ImageDraw.Draw(thisim)
draw.rectangle((minx1,miny1,maxx2,maxy2), outline='red')
# Perform crop at end
if imtype=='crop':
minx1=min([i[0] for i in self.gridlist])
maxx2=max([i[2] for i in self.gridlist])
miny1=min([i[1] for i in self.gridlist])
maxy2=max([i[3] for i in self.gridlist])
# perform crop to quant mapped region
thisim.crop([minx1, miny1, maxx2, maxy2])
imname=self.directory+'/'+fname
thisim.save(imname)
class Pixarropts():
''' Parent is GUImain, manages pixarray parameter displayed in pixarrviewer
handles modifications (on/off) for
and save of pixarray file, autotool, spatial array files
'''
def __init__(self,root,parent):
self.root = root
self.parent = parent
# Lots of tk variables
self.arrsize=tk.IntVar() # Number of pixels for QM scan (usually differs from 512 pix of imaging)
self.arrsize.set(100)
self.margin=tk.DoubleVar()
self.margin.set(0.2)
self.regint=tk.IntVar()
self.regint.set(2)
self.atbool=tk.BooleanVar() # make new Autotool file
self.atbool.set(False)
self.areabool=tk.BooleanVar() # make new spatial area files
self.areabool.set(False)
self.scramblebool=tk.BooleanVar()
self.scramblebool.set(False)
self.basename=tk.StringVar()
self.basename.set('100x100array20marg')
self.pixarrname=tk.StringVar()
self.pixarrname.set('samplename')
# Display/ entry of quantmap pixarray parameters
self.button_frame = tk.Frame(self.root, pady=10)
self.button_frame.pack(side=tk.TOP,fill=tk.X,expand=1)
self.pixarr_frame = tk.Frame(self.root, pady=10)
self.pixarr_frame.pack(side=tk.TOP,fill=tk.X,expand=1)
# Display/entry of multiplex scan parameters
self.multiplex_frame = tk.Frame(self.root, pady=10)
self.multiplex_frame.pack(side=tk.TOP,fill=tk.X,expand=1)
self.pixarr= None # created by makesquarearray
self.show_pixarrvars()
# permanent buttons in button_frame
rowframe=tk.Frame(self.button_frame)
tk.Button(rowframe, text='Make QM pixarray',
command=self.makesquarearray).pack(side=tk.LEFT,fill=tk.X,expand=1)
rowframe.pack(fill=tk.X, expand=1)
def show_pixarrvars(self):
''' Set up all the tkvars into pixarr_frame '''
rownum=0
tk.Label(self.pixarr_frame, text='Array Size (~100 x 100 max)').grid(row=rownum, column=1)
tk.Entry(self.pixarr_frame, textvariable=self.arrsize).grid(row=rownum, column=0)
rownum+=1
tk.Label(self.pixarr_frame, text='Unmapped margin (fraction)').grid(row=rownum, column=1)
tk.Entry(self.pixarr_frame, textvariable=self.margin).grid(row=rownum, column=0)
rownum+=1
tk.Label(self.pixarr_frame, text='Image reg interval (Autotool)').grid(row=rownum, column=1)
tk.Entry(self.pixarr_frame, textvariable=self.regint).grid(row=rownum, column=0)
rownum+=1
tk.Label(self.pixarr_frame, text='Pix array name').grid(row=rownum, column=1)
tk.Entry(self.pixarr_frame, textvariable=self.pixarrname).grid(row=rownum, column=0)
rownum+=1
tk.Label(self.pixarr_frame, text='Basename Autotool & spatial areas').grid(row=rownum, column=1)
tk.Entry(self.pixarr_frame, textvariable=self.basename).grid(row=rownum, column=0)
rownum+=1
tk.Checkbutton(self.pixarr_frame, variable=self.atbool, text='Make new Autotool file?').grid(row=rownum, column=0)
tk.Checkbutton(self.pixarr_frame, variable=self.areabool, text='Create new spatial area files?').grid(row=rownum, column=1)
rownum+=1
tk.Checkbutton(self.pixarr_frame, variable=self.scramblebool, text='hop beam to minimize charging?').grid(row=rownum, column=0)
rownum+=1
tk.Label(self.pixarr_frame, text='If same parameters, existing Autotool and spatial area files can be reused.').grid(row=rownum, column=0, columnspan=3)
rownum+=1
def makesquarearray(self):
''' Divide up 512x512 pixels in map into n areas and format correctly for spatial areas phi files
(which are loaded using Autotool loops into PHI Smartsoft);
Mapping proceed horizontally (x horizontal, y vertical)
ARGS:
arraysize -- # of pixels in one direction
margin - % of image field that is unmapped (20% means 10% of 512 field at both edges (aka 51 pixels))
is unmapped
basename - basename for area definition files
e.g. "50x50array20m" basemname makes files 50x50array20m1, 50x50array20m2, etc.
KWARGS: 'regint' - interval at which to build in image registration into autotool loop; val of 1 means
register every 20 pixels (since each area file holds 20 defined spatial areas); passed to makeautotool
* this is best way to incorporate image registration in quantmap process... more flexible
interval allowed; if instead one builds in image reg into multiplex itself, one has to
run image registration much more frequently which unnecessarily increases acquisition time
'writeareas' (bool) - write new spatial area definition files (old ones can be reused if same arraysize
and margin)
'writeAutotool' -- write of new Autotool sequence (can be reused if same arraysize/regint)
scrambled
'''
print('Making square array')
pix=512 # default value for Auger instrument
arraysize=self.arrsize.get()
width=(pix*(1-self.margin.get())/arraysize) # width/height of scan pixel in terms of 512x512 field
startxy=int(pix*self.margin.get()/2) # split margin between top/bottom, left/right
mycols=['Xindex','Yindex','Areanumber','PHIname','Subnumber','X1','Y1','X2','Y2', 'Width', 'Height']
dim=arraysize**2
# square is the pixarray file correlating 101.spe with associated pixel in quantmap
self.pixarr=pd.DataFrame(index=np.arange(0,dim), columns=mycols)
# x is horizontal axis and mapping proceeds by going across top row
for index,row in self.pixarr.iterrows():
xindex=index//arraysize # remainder is row (0th is top row)
yindex=index%arraysize # mod is correct column (0th is left column)
self.pixarr.loc[index]['Xindex']=xindex # remainder is row
self.pixarr.loc[index]['Yindex']=yindex # mod is correct column
left=int(width*yindex+startxy) # left-right position depends on column
self.pixarr.loc[index]['X1']=left
right=int(width*yindex+startxy+width)
self.pixarr.loc[index]['X2']=right
top=int(width*xindex+startxy)
self.pixarr.loc[index]['Y1']=top # top-bottom position depends on row
bottom=int(width*xindex+startxy+width)
self.pixarr.loc[index]['Y2']=bottom
self.pixarr.loc[index]['Width']=right-left # variations due to rounding error
self.pixarr.loc[index]['Height']=bottom-top
# true area number describing pix position after file combination
self.pixarr.loc[index]['Areanumber']=index+1
# max of 20 areas allowed per spatial area .phi file
self.pixarr.loc[index]['Subnumber']=index%20 # Before combination w/ 20 areas per file
filenum=index//20+1 # filenumber of multiplex
self.pixarr.loc[index]['PHIname']=self.basename.get()+str(filenum)
filelist=self.pixarr.PHIname.unique()
filelist=np.ndarray.tolist(filelist)
# map/move beam non-sequentially to minimize local charging
if self.scramblebool.get():
areanums=np.ndarray.tolist(self.pixarr.Areanumber.unique())
areanums=np.random.permutation(areanums)
self.pixarr['Areanumber']=pd.Series(areanums)
self.pixarr=self.pixarr.sort_values(['Areanumber'])
# need to reassign subnumber and PHIname
self.pixarr=self.pixarr.reset_index(drop=True)
for index,row in self.pixarr.iterrows():
self.pixarr=self.pixarr.set_value(index,'Subnumber', index%20)
self.pixarr=self.pixarr.set_value(index,'PHIname', self.basename.get()+str(index//20+1))
self.parent.pixviewer.pass_pixarray(self.pixarr)
print('Square array created.')
def save_pixarray(self):
'''Menu/main lauched save of pixarray file (after linking with
underlying data files '''
if self.pixarr is None:
return
filelist=self.pixarr.PHIname.unique()
filelist=np.ndarray.tolist(filelist)
if self.areabool.get():
for i, fname in enumerate(filelist):
thisfile=self.pixarr[self.pixarr['PHIname']==fname]
self.writeAESareas(thisfile, fname) # writes each list of 20 areas to separate .phi text file
print('New spatial area files saved.. e.g.', fname)
if self.atbool.get(): # option write to new Autotool file
atframe=self.makeautotool(filelist)
ATname='AT'+self.basename.get()+'.phi'
self.writeautotool(atframe, ATname)
print('Saving new autotool file', ATname)
# Instead of C:\Temp copy multiplex and spatial areas files to Smartsoft settings folders
mydir=self.parent.pixviewer.directory # get directory from pixviewer window
if mydir==None: # in case image hasn't been loaded
mydir=filedialog.askdirectory('Find data directory')
fname=mydir+'/'+self.pixarrname.get()+'_pixarr.csv'
self.pixarr.to_csv(fname, index=False)
print('Pixarray file saved', fname)
def writeAESareas(self, df, PHIname):
''' Optional write of stage positions to .phi spatial areas file in chunks of 25 positions
Some weird encoding so just read and modify existing pff file '''
datastr=''
datastr+='[SpatialArea]\nArea Count='
datastr+=str(len(df)) # number of areas
datastr+='\n'
for i in range(0,len(df)):
datastr+='Area Active '
datastr+='%d' % i
datastr+='=True\n'
for i in range(0,len(df)):
datastr+='Area Mode '
datastr+='%d' % i
datastr+='=Area\n'
for i in range(0,len(df)):
datastr+='Area Left '
datastr+='%d' % i
datastr+='='
val=df.iloc[i]['X1']
datastr+='%d' % val
datastr+='\n'
for i in range(0,len(df)):
datastr+='Area Top '
datastr+='%d' % i
datastr+='='
val=df.iloc[i]['Y1']
datastr+='%d' % val
datastr+='\n'
for i in range(0,len(df)):
datastr+='Area Right '
datastr+='%d' % i
datastr+='='
val=df.iloc[i]['X2']
datastr+='%d' % val
datastr+='\n'
for i in range(0,len(df)):
datastr+='Area Bottom '
datastr+='%d' % i
datastr+='='
val=df.iloc[i]['Y2']
datastr+='%d' % val
datastr+='\n'
for i in range(0,len(df)):
datastr+='Area Width '
datastr+='%d' % i
datastr+='='
val=df.iloc[i]['Width']
datastr+='%d' % val
datastr+='\n'
for i in range(0,len(df)):
datastr+='Area Height '
datastr+='%d' % i
val=df.iloc[i]['Height']
datastr+='='
datastr+='%d' % val
datastr+='\n'
# Write this chunk of files to .phi spatial areas file (done w/ file replace method since encoding is weird unknown type)
filename=PHIname+'.phi'
try:
shutil.copyfile(AREAFILE, filename)
except:
mydir=filedialog.askdirectory(title='Select directory with spatial area file example.')
shutil.copyfile(mydir+'/'+'spatial_areas_sample_min.phi', filename)
for line in fileinput.input(filename, inplace=1):
sys.stdout.write(datastr)
def makeautotool(self, filelist, multacq='QM_multiplex.phi'):
'''Generates df with Autotool commands and data values (for generating Autotool phi file)
7/10 Modified with image reg insertion
spatial area files have to be in Auger's default directory for them..
attempting load from elsewhere (i.e. from C:\Temp) doesn't seem to work
kwarg: regint - interval for insertion of image registrations
multiplex breaks -- used in shifted situation (series of files at different shifts
multacq -- name of multiplex file to load... not currently implemented
'''
mycols=['Command','Data']
atframe=pd.DataFrame(columns=mycols)
atframe['Command']=['AES:Register Image','SEM:Photo']
# drop load of first multiplex ... user can just load it before start
''' TODO multiplex break not yet implemented
allows load of different multiplex file in middle of scans (in cases of
spatially varying charging across mapped region)
# CODE SECTION
if 'multibreaks' in kwargs:
multibreaks=kwargs.get('multibreaks',[])
multinames=kwargs.get('multinames',[])
multacq=multinames[0] # set to first shifted multiplex to load
if 0 in multibreaks: # first multiplex file loaded w/ multacq
multibreaks.remove(0)
'''
# Add first multiplex load (rest are below)
''' # just load multiplex manually (unless multibreaks is implemented)
newrow=pd.DataFrame(index=np.arange(0,1), columns=mycols)
newrow=newrow.set_value(0,'Command','AES:Load Multiplex Setting...')
newrow=newrow.set_value(0,'Data', multacq)
atframe=pd.concat([atframe,newrow], ignore_index=True)
'''
for i, file in enumerate(filelist):
newrow=pd.DataFrame(index=np.arange(0,2), columns=mycols)
newrow=newrow.set_value(0,'Command','AES:Load Area Define Setting...')
newrow=newrow.set_value(0,'Data', file)
newrow=newrow.set_value(1,'Command','AES:Multiplex Acquire')
atframe=pd.concat([atframe,newrow], ignore_index=True)
''' TODO Multiplex breaks section
# Now add load of next shifted multiplex file
if 'multibreaks' in kwargs:
if i in multibreaks:
lindex=multibreaks.index(i)
newrow=pd.DataFrame(index=np.arange(0,1), columns=mycols)
newrow=newrow.set_value(0,'Command','AES:Load Multiplex Setting...')
# multfile must be in settings/multiplex acquire (no file extensions in Autotool data cols)
newrow=newrow.set_value(0,'Data', multinames[lindex+1].replace('.phi',''))
atframe=pd.concat([atframe,newrow], ignore_index=True)
'''
# Tag on ending to autotool file
newrow=pd.DataFrame(index=np.arange(0,1), columns=mycols)
newrow['Command']=['SEM:Photo']
atframe=pd.concat([atframe,newrow], ignore_index=True) # ending SEM photo
newrow=pd.DataFrame(index=np.arange(0,1), columns=mycols)
newrow['Command']=['SEM:SEM Load By Ref...']
newrow['Data']=[BEAMDEFLECT]
atframe=pd.concat([atframe,newrow], ignore_index=True) # add final beam deflection
return atframe
def writeautotool(self, atframe, atname):
''' Write of standard autotool loop for quantmap
atframe created by makeautotool... name is passed
weird encoding so just read and modify existing pff file '''
datastr=''
datastr+='[AutoTool]\nTask Count='
datastr+=str(len(atframe)) #
datastr+='\n'
for index, row in atframe.iterrows():
datastr+='Task '
datastr+='%d' % index
command=atframe.loc[index]['Command']
datastr+='='
datastr+=command
datastr+='\n'
datastr+='Data Count='
datastr+=str(len(atframe)) #
datastr+='\n'
for index, row in atframe.iterrows():
datastr+='Data '
datastr+='%d' % index
datastr+='='
val=atframe.loc[index]['Data']
if str(val)!='nan':
datastr+=str(val) # could be int in some cases
datastr+='\n'
# Write this chunk of files to .phi spatial areas file (done w/ file replace method since encoding is weird unknown type)
shutil.copyfile(AREAFILE, atname)
for line in fileinput.input(atname, inplace=1):
sys.stdout.write(datastr)
def plot_maps(self):
''' display 2D arrays of various types in mapviewer '''
activeelems=[]
plotmaps=[]
title=''
for i, tkbool in enumerate(self.tkelems):
if tkbool.get():
if self.plottype.get()=='Shiftmap':
if self.QMfile.shiftmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
plotmaps.append(self.QMfile.shiftmaps[i])
title='Peak shift'
elif self.plottype.get()=='Amplmap':
if self.QMfile.amplmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
plotmaps.append(self.QMfile.amplmaps[i])
title='Peak amplitude'
elif self.plottype.get()=='Elemmap':
if self.QMfile.amplmaps[i] is not None:
activeelems.append(self.QMfile.elements[i])
plotmaps.append(self.QMfile.elemmaps[i])
title='Element map'
print("Plotting ", self.plottype.get(), "for elements", ".".join(activeelems))
self.parent.mapviewer.replot_maps(activeelems, plotmaps, title)
class Multiplexopts():
''' Parent is GUImain, manages multiplex element and other parameter
for creation of QM multiplex file
also includes time estimate updates for display somewhere
based on existing QMmultiplex_setup (spyder launched tk version)
'''
def __init__(self, root, parent):
self.root = root
self.parent = parent
# Load AESquantparams
self.aesquantparams = pd.DataFrame()
self.loadAESquantparams()
self.elements=[]
self.elemparams=[] # standard QM vals from aesquantparams
# common multiplex scan parameters
self.dwell=tk.IntVar()
self.dwell.set(20)
self.numcycles=tk.IntVar()
self.numcycles.set(3)
self.peakshift=tk.IntVar() # globally appliable peak shift (and no local shift option)
self.peakshift.set(0)
self.peakwidth=tk.IntVar() # globally appliable peak shift
self.peakwidth.set(7)
# self.regint=tk.IntVar() # image registration interval (if done w/in multiplex)
# self.regint.set(0)
self.phiname=tk.StringVar()
self.phiname.set('QMmultiplex.phi')
self.timeest=tk.DoubleVar()
# lists of tkvars with multiplex parameters
self.peaks=[]
self.sweeps=[]
self.widths=[]
self.lowers=[] # lower ev of scan range
self.uppers=[]
# Shows all scan regions within multiplex
self.top_frame= tk.Frame(self.root)
# show/alter common multiplex parameters
self.param_frame = tk.Frame(self.top_frame, pady=10)
self.param_frame.pack(side=tk.LEFT,fill=tk.X,expand=1)
# button frame below global params
self.button_frame = tk.Frame(self.top_frame, pady=10)
self.button_frame.pack(side=tk.LEFT,fill=tk.X,expand=1)
self.top_frame.pack(side=tk.TOP,fill=tk.X,expand=1)
self.mult_frame = tk.Frame(self.root, pady=10)
self.mult_frame.pack(side=tk.BOTTOM,fill=tk.X,expand=1)
tk.Button(self.button_frame, text='Choose elements',
command=self.chooseelems).pack(side=tk.TOP,fill=tk.X,expand=1)
tk.Button(self.button_frame, text='Apply global shift/width',
command=self.applyglobals).pack(side=tk.TOP,fill=tk.X,expand=1)
# Add a button label
tk.Label(self.button_frame, text='Make local peak changes').pack(side=tk.TOP,fill=tk.X,expand=1)
tk.Button(self.button_frame, text='Change ranges using widths',
command=self.updatepeaks).pack(side=tk.TOP,fill=tk.X,expand=1)
command=self.updatewidths).pack(side=tk.TOP,fill=tk.X,expand=1)
tk.Button(self.button_frame, text='Recalc/update',
command=self.recalcupdate).pack(side=tk.TOP,fill=tk.X,expand=1)
tk.Button(self.button_frame, text='Save multiplex',
command=self.save_multiplex).pack(side=tk.TOP,fill=tk.X,expand=1)
self.display_multparams()
def save_multiplex(self):
''' Saves new multiplex to file '''
multdf=self.makemultdf()
# Save modified multiplex scan for QM
self.writemultiplex(multdf)
def makemultdf(self):
''' Reconstruct dataframe holding altered multiplex scan parameters
then feed to writemultiplex'''
mycols=['AtomNum', 'Elem', 'Active', 'Sweeps', 'EVstep', 'Lower', 'Upper',
'Range', 'Lowpeak', 'Peak', 'Hipeak', 'Back1', 'Back2']
multdf=pd.DataFrame(columns=mycols)
atnos=[]
regs=[]
peaks=self.convert_tklist(self.peaks)
sweeps=self.convert_tklist(self.sweeps)
widths=self.convert_tklist(self.widths)
lowers=self.convert_tklist(self.lowers)
uppers=self.convert_tklist(self.uppers)
for i, [elem, lowreg, peak, hireg, sweep, atno] in enumerate(self.elemparams):
atnos.append(atno)
atnos.append(atno)
atnos.append(atno)
regs.append(elem+'L')
regs.append(elem)
regs.append(elem+'H')
multdf['AtomNum']=atnos
multdf['Elem']=regs
multdf['Active']='Y'
multdf['Sweeps']=sweeps
multdf['Sweeps']=sweeps
multdf['EVstep']=1.0
multdf['Peak']=peaks
multdf['Lower']=lowers
multdf['Upper']=uppers
multdf['Lowpeak']=multdf['Lower']+2
multdf['Hipeak']=multdf['Upper']-2
multdf['Back1']=multdf['Lower']
multdf['Back2']=multdf['Upper']
# convert half-widths to full widths
widths=[2*i+1 for i in widths]
multdf['Range']=widths
# Eliminate any overlaps in scan range (remove from L or H not from main peak)
# overlaps should already be gone b/c of tk gui methods
for i, [elem, lowreg, peak, hireg, sweep, atno] in enumerate(self.elemparams):
lowend=multdf.iloc[i]['Upper']
mainstart=multdf.iloc[i+1]['Lower']
mainend=multdf.iloc[i+1]['Upper']
highstart=multdf.iloc[i+2]['Lower']
# print(str(lowend), str(mainstart), str(mainend),str(highstart))
if mainstart<lowend:
multdf=multdf.set_value(multdf.index[i],'Upper', mainstart-1)
if highstart<mainend:
multdf=multdf.set_value(multdf.index[i+2],'Lower', mainend+1)
return multdf
def writemultiplex(self, multdf):
''' Write of multiplex settings file (max 20 regions) after interactive param setting
Some weird encoding so just read and modify existing pff file
image registration choices are cycles or areas
kwargs:
regmode - Areas or (if not present, image registration done using autotool not in multiplex)
reginterval - 2 (or whatever)'''
phiname=self.phiname.get()
dwell=self.dwell.get()
numcycles=self.numcycles.get()
datastr='' # long data string for multiplex file
datastr+='[MultiplexAcq]\n'
''' image registration option within multiplex (better in autotool)
# (also can be done in autotool at lower frequencies)
if int(self.regint.get())>0: # ensure this is integer
regmode='Areas'
reginterval=int(self.regint.get())
datastr+='Register Image=True\nImage Registration Interval='
datastr+=str(reginterval)
datastr+='\n'
datastr+='Image Registration Mode='
datastr+=regmode
else:
'''
datastr+='Register Image=False'
# likely can skip other params if this is false
datastr+='\nTime Per Step (ms)='
datastr+='%0.1f' % dwell
datastr+='\nNegative Values=Allow\nNumber of Regions='
datastr+=str(len(multdf)) # number of multiplex regions
datastr+='\nAtomic Number List Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Atomic Number List '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['AtomNum']
datastr+='%d' % val
datastr+='\n'
datastr+='Element List Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Element List '
datastr+='%d' % i
datastr+='='
strval=multdf.iloc[i]['Elem']
datastr+=strval
datastr+='\n'
datastr+='Active Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Active '
datastr+='%d' % i
datastr+='=True\n' # won't be present in Df if not active
datastr+='Sweeps Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Sweeps '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Sweeps']
datastr+='%d' % val
datastr+='\n'
datastr+='Lower Acq Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Lower Acq '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Lower']
datastr+='%0.1f' % val # float with tenths place precision
datastr+='\n'
datastr+='Upper Acq Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Upper Acq '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Upper']
datastr+='%0.1f' % val
datastr+='\n'
datastr+='Acq Range Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Acq Range '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Range']
datastr+='%0.1f' % val
datastr+='\n'
datastr+='Lower Analysis Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Lower Analysis '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Lowpeak']
datastr+='%0.1f' % val
datastr+='\n'
datastr+='Upper Analysis Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Upper Analysis '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Hipeak']
datastr+='%0.1f' % val
datastr+='\n'
datastr+='eV Per Step Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='eV Per Step '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['EVstep']
datastr+='%0.1f' % val
datastr+='\n'
datastr+='Peak Energy Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Peak Energy '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Peak']
datastr+='%0.1f' % val
datastr+='\n'
datastr+='Background 1 Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Background 1 '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Back1']
datastr+='%0.1f' % val
datastr+='\n'
datastr+='Background 2 Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Background 2 '
datastr+='%d' % i
datastr+='='
val=multdf.iloc[i]['Back2']
datastr+='%0.1f' % val
datastr+='\n'
datastr+='Number Of Channels Count='
datastr+=str(len(multdf))
datastr+='\n'
for i in range(0,len(multdf)):
datastr+='Number Of Channels '
datastr+='%d' % i
datastr+='=1 thru 8\n'
datastr+='Number of Cycles='
datastr+=str(numcycles)
datastr+='\n'
# Write this chunk of files to .phi spatial areas file (done w/ file replace method since encoding is weird unknown type)
try:
shutil.copyfile(AREAFILE,phiname)
except:
mydir=filedialog.askdirectory(title='Select directory with spatial area file example.')
shutil.copyfile(mydir+'/'+'spatial_areas_sample_min.phi',phiname)
for line in fileinput.input(phiname, inplace=1):
sys.stdout.write(datastr)
print(phiname,' saved to current working directory')
def convert_tklist(self, tklist):
''' convert list of tk inter variables to normal python list '''
newlist=[]
for i, val in enumerate(tklist):
newlist.append(val.get())
return newlist
def display_multparams(self):
''' Display of global params for multiplex scan '''
# Initialize w/ shift of zero
# elem name, lower region center, peak center, upper reg center, # sweeps
shift=0
self.elemparams=self.getQMparams(shift)
# Display of array size and other params
tk.Label(self.param_frame, text='Dwell time (ms)').grid(row=0, column=6)
tk.OptionMenu(self.param_frame, self.dwell, 5, 10, 20) # drop-down
tk.Entry(self.param_frame, textvariable=self.dwell).grid(row=0, column=7)
tk.Label(self.param_frame, text='# of cycles').grid(row=1, column=6)
tk.Entry(self.param_frame, textvariable=self.numcycles).grid(row=1, column=7)
tk.Label(self.param_frame, text='Peak shift').grid(row=2, column=6)
tk.Entry(self.param_frame, textvariable=self.peakshift).grid(row=2, column=7)
tk.Label(self.param_frame, text='Half width').grid(row=3, column=6)
tk.Entry(self.param_frame, textvariable=self.peakwidth).grid(row=3, column=7)
tk.Label(self.param_frame, text='Multiplex name').grid(row=4, column=6)
tk.Entry(self.param_frame, textvariable=self.phiname).grid(row=4, column=7)
tk.Label(self.param_frame, text='Time (hrs)').grid(row=5, column=6)
tk.Entry(self.param_frame, textvariable=self.timeest).grid(row=5, column=7)
# tk var not needed for peak/ element regions ... just display
def init_peaks(self):
''' all tkvars for multiplex scan params '''
print('initializing peak parameters for elements',",".join(self.elements))
# clear all existing
self.peaks=[]
self.sweeps=[]
self.widths=[]
self.lowers=[] # lower ev of scan range
self.uppers=[]
# Create lists of tk string variables
for i in range(0,3*len(self.elements)):
self.peaks.append(tk.DoubleVar()) # center of scan window
self.lowers.append(tk.DoubleVar()) # lower limit of scan window
self.uppers.append(tk.DoubleVar())
self.sweeps.append(tk.IntVar())
self.widths.append(tk.IntVar())
# initialize based on elemparams
for i, [elem, lowreg, peak, hireg, sweep, atno] in enumerate(self.elemparams):
self.peaks[3*i].set(int(self.elemparams[i][1])) # lower window for element
self.lowers[3*i].set(int(self.elemparams[i][1]-7))
self.uppers[3*i].set(int(self.elemparams[i][1]+7))
self.widths[3*i].set(7)
self.sweeps[3*i].set(1)
self.peaks[3*i+1].set(int(self.elemparams[i][2])) # peak itself
self.lowers[3*i+1].set(int(self.elemparams[i][2]-7))
self.uppers[3*i+1].set(int(self.elemparams[i][2]+7))
self.widths[3*i+1].set(7)
self.sweeps[3*i+1].set(int(self.elemparams[i][4]))
self.peaks[3*i+2].set(int(self.elemparams[i][3])) # upper window for element
self.lowers[3*i+2].set(int(self.elemparams[i][3]-7))
self.uppers[3*i+2].set(int(self.elemparams[i][3]+7))
self.widths[3*i+2].set(7)
self.sweeps[3*i+2].set(1)
def display_peaks(self):
''' Display all multiplex peak scan params in mult_frame '''
print('display peaks called')
for child in self.mult_frame.winfo_children():
child.destroy()
# First col are energy region/element labels (not alterable)
tk.Label(self.mult_frame, text='Region').grid(row=0, column=0)
tk.Label(self.mult_frame, text='Peak').grid(row=0, column=1)
tk.Label(self.mult_frame, text='Lower').grid(row=0, column=2)
tk.Label(self.mult_frame, text='Upper').grid(row=0, column=3)
tk.Label(self.mult_frame, text='Sweeps').grid(row=0, column=4)
tk.Label(self.mult_frame, text='Half-width').grid(row=0, column=5)
# Display peak params (centers/ lower ev/ upper ev/ width/
rownum=1 # place peaks after column header row
print('Elements are',",".join(self.elements))
for i, elem in enumerate(self.elements):
# first list element regions (i.e. SiL, Si, SiH)
tempstr=elem+'L'
tk.Label(self.mult_frame, text=tempstr).grid(row=rownum, column=0)
tk.Entry(self.mult_frame, textvariable=self.peaks[3*i], width=7).grid(row=rownum, column=1)
tk.Entry(self.mult_frame, textvariable=self.lowers[3*i], width=7).grid(row=rownum, column=2)
tk.Entry(self.mult_frame, textvariable=self.uppers[3*i], width=7).grid(row=rownum, column=3)
tk.Entry(self.mult_frame, textvariable=self.sweeps[3*i], width=7).grid(row=rownum, column=4)
tk.Entry(self.mult_frame, textvariable=self.widths[3*i], width=7).grid(row=rownum, column=5)
rownum+=1
# now handle peak line itself
tk.Label(self.mult_frame, text=elem).grid(row=rownum, column=0)
tk.Entry(self.mult_frame, textvariable=self.peaks[3*i+1], width=7).grid(row=rownum, column=1)
tk.Entry(self.mult_frame, textvariable=self.lowers[3*i+1], width=7).grid(row=rownum, column=2)
tk.Entry(self.mult_frame, textvariable=self.uppers[3*i+1], width=7).grid(row=rownum, column=3)
tk.Entry(self.mult_frame, textvariable=self.sweeps[3*i+1], width=7).grid(row=rownum, column=4)
tk.Entry(self.mult_frame, textvariable=self.widths[3*i+1], width=7).grid(row=rownum, column=5)
rownum+=1
tempstr=elem+'H'
tk.Label(self.mult_frame, text=tempstr).grid(row=rownum, column=0)
tk.Entry(self.mult_frame, textvariable=self.peaks[3*i+2], width=7).grid(row=rownum, column=1)
tk.Entry(self.mult_frame, textvariable=self.lowers[3*i+2], width=7).grid(row=rownum, column=2)
tk.Entry(self.mult_frame, textvariable=self.uppers[3*i+2], width=7).grid(row=rownum, column=3)
tk.Entry(self.mult_frame, textvariable=self.sweeps[3*i+2], width=7).grid(row=rownum, column=4)
tk.Entry(self.mult_frame, textvariable=self.widths[3*i+2], width=7).grid(row=rownum, column=5)
rownum+=1
def getQMparams(self, shift):
''' retrieve direct peak location, low & high energy regions (background below and
above peak), and default # of sweeps for passed elements '''
self.elemparams=[]
for i, elem in enumerate(self.elements):
match=self.aesquantparams[self.aesquantparams['element']==elem]
if len(match)==1:
lowback=round(match.iloc[0]['QMlow']+shift,1)
hiback=round(match.iloc[0]['QMhigh']+shift,1)
# Direct peak location val is relative to negpeak
peak=round(match.iloc[0]['negpeak']+match.iloc[0]['integpeak']+shift,1)
# Default # of sweeps for actual peak
sweeps=int(match.iloc[0]['QMsweeps'])
atno=int(match.iloc[0]['atno']) # atomic number
self.elemparams.append([elem, lowback, peak, hiback, sweeps, atno])
else:
print("Couldn't find element", elem)
def recalc(self):
''' Update time estimates after any value changes (not tied to button) but called from
within every method
'''
# Get array size from pixarropts
print(self.parent.pixopts.arrsize.get())
# Acquisition time est. in hours
numchannels=0
for i in range(0,len(self.widths)):
# 2 * half-width * # sweeps * num cycles is # of ev channels scanned
numchannels+=int(self.numcycles.get()) * int(self.sweeps[i].get()) * (int(self.widths[i].get())*2+1)
# time for single multiplex (per area)
singletime=numchannels*(self.dwell.get())/1000
numareas=self.parent.pixopts.arrsize.get()**2
fulltime= singletime*numareas/3600
tempstr = "%.2f" % fulltime
self.timeest.set(tempstr)
def updatewidth(self):
''' Use current lowers/uppers values to calculate each local width '''
# first reset centers/ranges to initial values
for i, [elem, lowreg, peak, hireg, sweep, atno] in enumerate(self.elemparams):
if int(self.uppers[3*i].get() - self.lowers[3*i].get())/2 > 0:
self.widths[3*i].set(int(self.uppers[3*i].get() - self.lowers[3*i].get())/2 )
else:
self.widths[3*i].set(0)
if int(self.uppers[3*i+1].get() - self.lowers[3*i+1].get())/2 >0:
self.widths[3*i+1].set(int(self.uppers[3*i+1].get() - self.lowers[3*i+1].get())/2)
else:
self.widths[3*i+1].set(0)
if int(self.uppers[3*i+2].get() - self.lowers[3*i+2].get())/2 >0:
self.widths[3*i+2].set(int(self.uppers[3*i+2].get() - self.lowers[3*i+2].get())/2)
else:
self.widths[3*i+2].set(0)
def adjustpeakcenter(self):
''' Squeezing of background regions changes effective center '''
for i, [elem, lowreg, peak, hireg, sweep, atno] in enumerate(self.elemparams):
self.peaks[3*i].set((self.uppers[3*i].get() + self.lowers[3*i].get())/2) # lower window for element
self.peaks[3*i+2].set((self.uppers[3*i+2].get() + self.lowers[3*i+2].get())/2)
def checkoverlaps(self):
''' After any changes adjust lower and upper backgrounds if overlapping w/ main region '''
for i, [elem, lowreg, peak, hireg, sweep, atno] in enumerate(self.elemparams):
if self.uppers[3*i].get() >= self.lowers[3*i+1].get():
self.uppers[3*i].set(self.lowers[3*i+1].get() - 1)
if self.lowers[3*i+2].get() <= self.uppers[3*i+1].get():
self.lowers[3*i+2].set(self.uppers[3*i+1].get() + 1)
# TODO width of OL or OH cannot be negative (constraint in updatewidth?)
# Changing lowers/ uppers change resulting width
self.updatewidth()
# changing lowers uppers for lowback and highback regions can change peakcenter
self.adjustpeakcenter()
def applyglobals(self):
''' Update each peak positions and widths based on global peak shift and
global width
button launched'''
for i, elem in enumerate(self.elemparams):
self.peaks[3*i].set(int(self.elemparams[i][1]+self.peakshift.get()))
self.lowers[3*i].set(int(self.elemparams[i][1]-self.peakwidth.get() +self.peakshift.get()))
self.uppers[3*i].set(int(self.elemparams[i][1] + self.peakwidth.get() +self.peakshift.get()))
self.widths[3*i].set(self.peakwidth.get())
self.peaks[3*i+1].set(int(self.elemparams[i][2]+self.peakshift.get()))
self.lowers[3*i+1].set(int(self.elemparams[i][2]-self.peakwidth.get() +self.peakshift.get()))
self.uppers[3*i+1].set(int(self.elemparams[i][2] + self.peakwidth.get() +self.peakshift.get()))
self.widths[3*i+1].set(self.peakwidth.get())
self.peaks[3*i+2].set(int(self.elemparams[i][3]+self.peakshift.get()))
self.lowers[3*i+2].set(int(self.elemparams[i][3]-self.peakwidth.get() +self.peakshift.get()))
self.uppers[3*i+2].set(int(self.elemparams[i][3] + self.peakwidth.get() +self.peakshift.get()))
self.widths[3*i+2].set(self.peakwidth.get())
self.checkoverlaps()
self.recalc() # update time estimate
def updatepeaks(self):
''' Use local widths (and global shift) to update peak centers and ranges
no vals for local energy shift but could change peaks/lowers/uppers '''
for i, [elem, lowreg, peak, hireg, sweep, atno] in enumerate(self.elemparams):
self.peaks[3*i].set(int(self.elemparams[i][1] + self.peakshift.get() )) # lower window for element
self.lowers[3*i].set(int(self.elemparams[i][1]+ self.peakshift.get() - self.widths[3*i].get()))
self.uppers[3*i].set(int(self.elemparams[i][1]+ self.peakshift.get() + self.widths[3*i].get()))
self.peaks[3*i+1].set(int(self.elemparams[i][2] + self.peakshift.get() )) # peak itself
self.lowers[3*i+1].set(int(self.elemparams[i][2] + self.peakshift.get() - self.widths[3*i+1].get()))
self.uppers[3*i+1].set(int(self.elemparams[i][2] + self.peakshift.get() + self.widths[3*i+1].get()))
self.peaks[3*i+2].set(int(self.elemparams[i][3] + self.peakshift.get())) # upper window for element
self.lowers[3*i+2].set(int(self.elemparams[i][3]+ self.peakshift.get() - self.widths[3*i+2].get()))
self.uppers[3*i+2].set(int(self.elemparams[i][3]+ self.peakshift.get() + self.widths[3*i+2].get()))
self.checkoverlaps()
# acquisition time est. in hours
self.recalc()
def updatewidths(self):
''' Use current lowers/uppers values to calculate each local width '''
# first reset centers/ranges to initial values
for i, [elem, lowreg, peak, hireg, sweep, atno] in enumerate(self.elemparams):
self.widths[3*i].set(int(self.uppers[3*i].get() - self.lowers[3*i].get())/2 )
self.widths[3*i+1].set(int(self.uppers[3*i+1].get() - self.lowers[3*i+1].get())/2)
self.widths[3*i+2].set(int(self.uppers[3*i+2].get() - self.lowers[3*i+2].get())/2)
self.checkoverlaps()
# acquisition time est. in hours
self.recalc()
def recalcupdate(self):
''' Update time estimates, widths, adjust overlap boundaries
'''
self.checkoverlaps()
self.recalc()
def chooseelems(self):
''' Select elements using pop-up toplevel; all available peaks from AESquantparams.csv '''
tframe = tk.Toplevel(self.root)
# Subset of elements selected (on) by default
elemdict={'S':1,'C':1,'Ti':1,'O':1,'Fe1':1,'Fe2':1,'Na':1,'Mg':1,'Al':1,'Si':1,'Fe':1,'Ca':1}
preset1={'C':1,'O':1,'Si':1,'Fe':1,'Mg':1,'Ca':1}
preset2={'O':1,'Mg':1,'Si':1,'Fe':1}
# All available elements/peaks are those with entries in Aesquantparams.csv
elems=np.ndarray.tolist(self.aesquantparams.element.unique())
varlist=[] # list of tkinter IntVars
for i, col in enumerate(elems): # set up string variables
varlist.append(tk.IntVar())
val=elemdict.get(col,0) # set to 1 or 0 based on above default dictionary
varlist[i].set(val) # set default value based on elemdict
tk.Label(tframe, text='Select elements for plotting or quant').grid(row=0,column=0)
def clearall():
''' Set all tkinter vars to zero '''
for i, col in enumerate(elems): # set up string variables
varlist[i].set(0) # set default value based on elemdict
def choose1():
''' Have available preset defaults and adjust checkbox values '''
# preset1={'S':1,'Mg':1,'Si':1,'Fe':1,'Ca':1,'Fe2':1}
# Still have to pass these through as tkinter ints
for i, col in enumerate(elems): # set up string variables
val=preset1.get(col,0) # set to 1 or 0 based on above default dictionary
varlist[i].set(val) # set default value based on elemdict
def choose2():
''' Have available preset defaults and adjust checkbox values '''
# preset2={'S':1,'Mg':1,'Si':1,'Fe':1,'Ca':1,'Fe2':1}
# Still have to pass these through as tkinter ints
for i, col in enumerate(elems): # set up string variables
val=preset2.get(col,0) # set to 1 or 0 based on above default dictionary
varlist[i].set(val) # set default value based on elemdict
def selectelems():
'''Choose checked elems and close popup '''
self.elements=[] # list of strings with plot number and x or y
for i, val in enumerate(varlist): # result in normal string, not tkinter StringVar
if val.get()==1: # this element is active
self.elements.append(elems[i]) # add element if box is checked
print('Selected elements are',",".join(self.elements))
self.getQMparams(int(self.peakshift.get())) # get QM params with current global peak shift
self.init_peaks()
self.display_peaks() # now show peaks with chosen elements
tframe.destroy()
for i, col in enumerate(elems):
# choose row, col grid position (starting row 1)
thisrow=i%3+1 # three column setup
thiscol=i//3
ent=tk.Checkbutton(tframe, text=elems[i], variable=varlist[i])
ent.grid(row=thisrow, column=thiscol)
# Add preset 1 button (defined above)
els=list(preset1)
mystr=', '.join(els)
c=tk.Button(tframe, text=mystr, command=choose1)
lastrow=len(elems)%3+2
c.grid(row=lastrow, column=0)
# Add preset 2 button
els=list(preset2)
mystr=', '.join(els)
d=tk.Button(tframe, text=mystr, command=choose2)
lastrow=len(elems)%3+3
d.grid(row=lastrow, column=0)
# clear all
e=tk.Button(tframe, text='Clear all', command=clearall)
lastrow=len(elems)%3+4
e.grid(row=lastrow, column=0)
g=tk.Button(tframe, text='Done', command=selectelems)
lastrow=len(elems)%3+7
g.grid(row=lastrow, column=0)
# Do further initialization once toplevel is destroyed
def loadAESquantparams(self):
''' Loads standard values of Auger quant parameters
TODO what about dealing with local shifts '''
# Checkbutton option for local (or standard) AESquantparams in file loader?
try:
self.aesquantparams=pd.read_csv(AESQUANTPARAMFILE, encoding='utf-8')
except:
dir=filedialog.askdirectory()
self.aesquantparams=pd.read_csv(dir+'/'+'aesquantparams.csv', encoding='utf-8')
print('AESquantparams loaded')
| mit |
mthz/rpg_svo | svo_analysis/src/svo_analysis/analyse_timing.py | 17 | 3476 | #!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as plt
def analyse_timing(D, trace_dir):
# identify measurements which result from normal frames and which from keyframes
is_frame = np.argwhere(D['repr_n_mps'] >= 0)
n_frames = len(is_frame)
# set initial time to zero
D['timestamp'] = D['timestamp'] - D['timestamp'][0]
# ----------------------------------------------------------------------------
# plot total time for frame processing
avg_time = np.mean(D['tot_time'][is_frame])*1000;
fig = plt.figure(figsize=(8, 3))
ax = fig.add_subplot(111, ylabel='processing time [ms]', xlabel='time [s]')
ax.plot(D['timestamp'][is_frame], D['tot_time'][is_frame]*1000, 'g-', label='total time [ms]')
ax.plot(D['timestamp'][is_frame], np.ones(n_frames)*avg_time, 'b--', label=str('%(time).1fms mean time' % {'time': avg_time}))
ax.legend()
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot boxplot
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='Processing time [ms]')
ax.boxplot([
D['tot_time'][is_frame]*1000,
# D['t_local_ba'][is_kf]*1000,
D['pose_optimizer'][is_frame]*1000 + D['point_optimizer'][is_frame]*1000,
D['reproject'][is_frame]*1000,
D['sparse_img_align'][is_frame]*1000,
D['pyramid_creation'][is_frame]*1000
], 0,'', vert=0)
boxplot_labels = [
r'\textbf{Total Motion Estimation: %2.2fms}' % np.median(D['tot_time'][is_frame]*1000),
# 'Local BA (KF only): %.2fms ' % np.median(D['local_ba'][is_kf]*1000),
'Refinement: %2.2fms' % np.median(D['pose_optimizer'][is_frame]*1000 + D['point_optimizer'][is_frame]*1000),
'Feature Alignment: %2.2fms' % np.median(D['reproject'][is_frame]*1000),
'Sparse Image Alignment: %2.2fms' % np.median(D['sparse_img_align'][is_frame]*1000),
'Pyramid Creation: %2.2fms' % np.median(D['pyramid_creation'][is_frame]*1000) ]
ax.set_yticks(np.arange(len(boxplot_labels))+1)
ax.set_yticklabels(boxplot_labels)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing_boxplot.pdf'), bbox_inches="tight")
# ----------------------------------------------------------------------------
# plot boxplot reprojection
fig = plt.figure(figsize=(6,2))
ax = fig.add_subplot(111, xlabel='Processing time [ms]')
ax.boxplot([ D['reproject'][is_frame]*1000,
D['feature_align'][is_frame]*1000,
D['reproject_candidates'][is_frame]*1000,
D['reproject_kfs'][is_frame]*1000 ], 0, '', vert=0)
boxplot_labels = [r'\textbf{Total Reprojection: %2.2fms}' % np.median(D['reproject'][is_frame]*1000),
'Feature Alignment: %2.2fms' % np.median(D['feature_align'][is_frame]*1000),
'Reproject Candidates: %2.2fms' % np.median(D['reproject_candidates'][is_frame]*1000),
'Reproject Keyframes: %2.2fms' % np.median(D['reproject_kfs'][is_frame]*1000) ]
ax.set_yticks(np.arange(len(boxplot_labels))+1)
ax.set_yticklabels(boxplot_labels)
fig.tight_layout()
fig.savefig(os.path.join(trace_dir,'timing_reprojection.pdf'), bbox_inches="tight") | gpl-3.0 |
gnsiva/Amphitrite | gui/ContourGui/CtrPlotPanel.py | 1 | 21410 | """Class for plotting 3D data as heatmaps for ContourGui()."""
__author__ = "Ganesh N. Sivalingam <[email protected]"
import matplotlib
import matplotlib as mpl
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import matplotlib.pyplot as plt
import numpy as np
import wx,os
import msClasses.MassSpectrum as MassSpectrum
from lib import utils
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
from collections import OrderedDict
import matplotlib.gridspec as gridspec
from AmphitriteEnums import *
from imClasses import ChargeStatePeak
import matplotlib.font_manager
prop = matplotlib.font_manager.FontProperties(size='small')
class CtrPlotPanel():
"""
:parameter panel: wx panel to contain this plot
"""
def __init__(self,panel):
self.dpi = 80
self.fig = Figure((3.0, 2.0), dpi=self.dpi)
self.canvas = FigCanvas(panel, -1, self.fig)
self.gs = gridspec.GridSpec(1,1)
self.axes = [None]
self.toolbar = NavigationToolbar(self.canvas)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.AddSpacer(10)
panel.SetSizer(self.vbox)
self.canvas.draw()
self.pickedValue = None
self.pickingActive = False
self.pickedAlready = False
self.tempAnnotation = None
self.settings = None
self.gui = None
self.lift = 30
self.picker = None
self.releaser = None
self.pickedValue = None
self.releasedValue = None
self.selectedAxis = None
self.atroposLeft = None
self.atroposRight = None
self.backgroundColour = 'white'
self.vmax = 100
self.cmap = mpl.colors.LinearSegmentedColormap.from_list(
'my_cmap',['white','b','k','k'],256)
self.cmap2 = mpl.colors.LinearSegmentedColormap.from_list(
'my_cmap',['white','r','k','k'],256)
self.differenceCmap = mpl.colors.LinearSegmentedColormap.from_list(
'my_cmap',['pink','r','r','white','b','b','c'],256)
#====================
# Adding other panel components
def setSettings(self,settings):
"""Set the CtrSettings() object.
:parameter settings: CtrSettings() object
"""
self.settings = settings
def setGui(self,gui):
"""Set the GUI object to enable accessing its attributes.
:parameter gui: ContourGui() object
"""
self.gui = gui
#====================
############################################################
# Axes stuff
############################################################
def _preparePlottingSections(self,rows,columns):
"""Set up the plot panel using GridSpec.
:parameter rows: Number of plot areas per row
:parameter columns: Number of plot areas per column
"""
self.fig.clf(keep_observers=True)
self.gs = gridspec.GridSpec(rows,columns)
def getSingleAxis(self):
"""Set up the plot panel with a single plot area and return it.
:returns: Matplotlib Axes object
"""
self._preparePlottingSections(1,1)
self.axes = [None]
self.axes[0] = self.fig.add_subplot(self.gs[0,0])
self.axes[0].plot([],[])
return self.axes[0]
def getDoubleAxes(self):
"""Set up the plot panel with two plot areas and return them.
:returns: List of two matplotlib Axes objects
"""
self._preparePlottingSections(3,1)
self.axes = [None,None]
self.axes[0] = self.fig.add_subplot(self.gs[0:2])
self.axes[1] = self.fig.add_subplot(self.gs[2])
return self.axes[0],self.axes[1]
# Axes stuff
############################################################
def setVmax(self,vmax):
"""The value used for the maximum color saturation value.
"""
if utils.isNumber(vmax):
self.vmax = float(vmax)
################################################################
################################################################
# Extraction limits functions
################################################################
# Mass spectra (Extraction limits)
def plotMsExtractionLimits(self,ax,limitsD=None):
"""Fill in the area to be extracted from the m/z axis
as arrival time or CCS.
:parameter ax: Matplotlib Axes object to use
:parameter limitsD: Set to True to use automatic maximum width size
for extraction
"""
if self.gui.checkboxAutoLimits.IsChecked():
self.plotAtroposSelectionAutoWidth(ax,limitsD)
else:
self.plotAtroposSelection(ax)
def plotAtroposSelection(self,ax):
"""Fill in the area to be extracted from the m/z axis
as arrival time or CCS.
:parameter ax: Matplotlib Axes object to use
"""
ylims = ax.get_ylim()
# if same limits all species
for sp in self.settings.atrOb.species.keys():
for z in self.settings.atrOb.simulatedSpecies[sp].charges:
limits = self._getMzLimits(sp,z)
ax.fill_betweenx(ylims,limits[0],
limits[1],color='red',alpha=0.1)
def plotAtroposSelectionAutoWidth(self,ax,limitsD):
"""Uses limits generated by: getAutoPeakLimits() instead of relative to
atropos generated fwhm values.
:parameter ax: Matplotlib Axes object to use
:parameter limitsD: Set to True to use automatic maximum width size
for extraction
"""
ylims = ax.get_ylim()
for sp,d in limitsD.items():
mass = self.settings.imOb.species[sp].mass
for z,limits in d.items():
mz = utils.get_mz(mass,z)
ax.fill_betweenx(ylims,limits[0],
limits[1],color='red',alpha=0.1)
def _getMzLimits(self,species,charge):
"""Get the m/z limits for a specific species charge state peak from
the Atropos fit.
:parameter species: Name of molecular species
:parameter charge: Charge state to get limits for
:returns: limits - in the form of [lower,upper]
"""
limits = self.settings.atrOb.simulatedSpecies[species].getPeakLimits(
charge,self.settings.widthL[species],self.settings.widthR[species])
return limits
################################################################
# Ion mobility (Extraction limits)
def getAutoPeakLimits(self):
# Get all the mzs of the various species charge states
zsD,massD = self.settings.getChargeAndMassDictionaries()
mzsD = OrderedDict()
chargeStatePeakObs = []
allMzs = []
for sp, zs in zsD.items():
mzsD[sp] = []
for z in zs:
mz = utils.get_mz(massD[sp],z)
mzsD[sp].append(mz)
allMzs.append(mz)
# See if they overlap
limitsD = OrderedDict()
for sp,zs in zsD.items():
limitsD[sp] = OrderedDict()
for z in zs:
csOb = ChargeStatePeak.ChargeStatePeak(massD[sp],z,zs)
limits = csOb.getLimits(allMzs)
limitsD[sp][z] = limits
return limitsD
################################################################
################################################################
def plotAtdForRefreshPlot(self,ax):
"""Plot arrival time distribution(s). Handles plain, log
scale and difference atd contour plots.
:parameter ax: Matplotlib Axes instance
"""
matrix,x,y = self.settings.imOb._getMatrixWithinLimits()
if self.gui.choiceScale.GetSelection() == 1:
matrix = self.logScaleMatrix(matrix)
# Difference plot
if self.gui.checkboxDifferencePlot.IsChecked():
matrix2,x2,y2 = self.settings.imOb2._getMatrixWithinLimits()
if self.gui.choiceScale.GetSelection() == 1:
matrix2 = self.logScaleMatrix(matrix2)
x = np.concatenate((x,x2))
y = np.concatenate((y,y2))
matrix = matrix - matrix2
cmap = self.differenceCmap
vmin = self.vmax*-1
# Single plot
else:
cmap = self.cmap
vmin = 0
ax.imshow(matrix, origin=[0,0],aspect='auto',
extent=[x.min(),x.max(),y.min(),y.max()],
cmap=cmap,vmin=vmin,vmax=self.vmax)
ax.set_xlabel('$m/z$')
ax.set_ylabel('t$_d$ ($ms$)')
def plotCcsVsMzForRefreshPlot(self,ax,limitsD):
"""Plot contour plots. Deals with CCS vs. mass spectrum and
charge state clemmer CCS plot.
:parameter ax: Matplotlib Axes instance
:parameter limitsD: Dictionary of lists of [lower,upper] m/z values
for plotting extraction limits
"""
''''''
########
# Extracting data in the imOb
# Auto peak limits
if self.gui.checkboxAutoLimits.IsChecked():
self.settings.imOb.generateSpeciesSlicesExplicit(limitsD)
if self.settings.imOb2:
self.settings.imOb2.generateSpeciesSlicesExplicit(limitsD)
# Relative to FWHM zstate peak limits
else:
for i,sp in enumerate(self.settings.species):
if i:
self.settings.imOb.generateSpeciesSlicesFwhm(
sp,self.settings.widthL[sp],self.settings.widthR[sp])
if self.settings.imOb2:
self.settings.imOb2.generateSpeciesSlicesFwhm(
sp,self.settings.widthL[sp],self.settings.widthR[sp])
if self.gui.checkboxDifferencePlot.IsChecked():
self.settings.imOb.generateSpeciesSlicesFwhm(
sp,self.settings.widthL[sp],self.settings.widthR[sp])
if self.settings.imOb2:
self.settings.imOb2.generateSpeciesSlicesFwhm(
sp,self.settings.widthL[sp],self.settings.widthR[sp])
# getting data
dataSlices = self.settings.imOb.getDataSlices()
dataSlices = self.scalingDataSlices(dataSlices)
vmin = 0
cmap = self.cmap
# difference plot changes
if self.gui.checkboxDifferencePlot.IsChecked():
dataSlices2 = self.settings.imOb2.getDataSlices()
dataSlices2 = self.scalingDataSlices(dataSlices2)
dataSlices = self.getDataSliceDifferences(dataSlices,dataSlices2)
vmin = self.vmax*-1
cmap = self.differenceCmap
#######
# Plotting
if not self.gui.checkboxShowChargeStatePlot.IsChecked():
self.settings.imOb.plotCcsHeatMapFromDataSlices(
ax,self.settings.calibrationOb,dataSlices,
cmap=cmap,vmin=vmin,vmax=self.vmax)
# set xlims
xlims = self.getContourXlims()
ax.set_xlim(xlims)
ax.set_xlabel('$m/z$')
species = 0
# Clemmer charge state plot
else:
species = self.gui.choiceChargeStatePlotSpecies.GetStringSelection()
self.settings.imOb.plotChargeStateContourPlotsFromDataSlices(
ax,self.settings.calibrationOb,dataSlices,species,
vmin=vmin,vmax=self.vmax,cmap=cmap)
ax.set_xlabel('Charge State')
ax.set_xticks([])
ax.set_ylabel('CCS ($\AA^2$)')
ax.set_axis_bgcolor(self.backgroundColour)
if self.gui.checkboxShowPeakTops.IsChecked():
self.plotPeakTops(ax,species)
def getDataSliceDifferences(self,dataSlices1,dataSlices2):
"""Create a difference matrix and return as a data slice.
:parameter dataSlices1: imClasses.DataSlice() object
:parameter dataSlices2: imClasses.DataSlice() object
:returns: imClasses.DataSlices() object
"""
for sp,spSlices in dataSlices1.items():
for z,dataSlice in spSlices.items():
dataSlice.matrix -= dataSlices2[sp][z].matrix
return dataSlices1
def plotMsForRefreshPlot(self,limitsD):
"""Plot the mass spectrum and the extraction limits if relevant.
:parameter limitsD: Dictionary of m/z region limits for extraction
(not by this function).
"""
ax,ax2 = self.getDoubleAxes()
colour1 = 'k'
if self.gui.checkboxDifferencePlot.IsChecked():
colour1 = 'b'
self.settings.imOb2.massSpectrum.plot(ax2,color='r')
self.settings.imOb.massSpectrum.plot(ax2,color=colour1)
ax2.set_ylim([0,105])
if self.settings.atrOb:
self.plotMsExtractionLimits(ax2,limitsD)
return ax,ax2
def refresh_plot(self):
"""Update the plot area using the current GUI settings.
"""
# Autopeak limits (maximising charge state strip width)
limitsD = None
if self.gui.checkboxAutoLimits.IsChecked():
limitsD = self.getAutoPeakLimits()
# Mass Spectrum Panel
if self.gui.checkboxShowMassSpectrum.IsChecked():
ax,ax2 = self.plotMsForRefreshPlot(limitsD)
else:
ax = self.getSingleAxis()
ax2 = None
# ATD vs m/z
if self.gui.radioboxPlotPanel.GetSelection() == 0:
self.plotAtdForRefreshPlot(ax)
if ax2:
ax.set_xlim(ax2.get_xlim())
# CCS vs m/z
elif self.gui.radioboxPlotPanel.GetSelection() == 1:
self.plotCcsVsMzForRefreshPlot(ax,limitsD)
self.draw()
def plotPeakTops(self,ax,species=0):
"""Handles all possibilities for displaying the peak tops:
CCS vs. m/z (not clemmer plot) -> plotCcsVsMzPeakTops
"""
#### NOT FINISHED ####
# Get information from gui
smths = self.gui.textCtrlSmoothes.GetValue()
wlen = self.gui.textCtrlWlen.GetValue()
smths, wlen = int(smths), int(wlen)
limit = float(self.gui.textCtrlLimit.GetValue())
if not self.gui.checkboxShowChargeStatePlot.IsChecked():
for i,sp in enumerate(self.settings.species):
if i:
# CCS vs. m/z plot
if self.gui.radioboxPlotPanel.GetSelection() == 1:
# NEED AN IF STATEMENT FOR SUBSTRACTION PLOT COLOUR
self.plotCcsVsMzPeakTops(ax,sp)
# ATD <=========== still to do
else:
# Clemmer plot (CCS ONLY AT THIS POINT)
self.settings.imOb.plotChargeStateContourPeaktops(
ax,self.settings.calibrationOb,species,smths,
wlen,limit=limit,dataType='ccs',colour='green')
# need if loops for ATD version and subtraction plot
def plotCcsVsMzPeakTops(self,ax,species,colour='green'):
"""Plot the peak tops found using first order derivative on
CCS against m/z contour plots.
:parameter ax: Matplotlib Axes() object
:parameter species: Molecular species name
:parameter colour: Matplotlib compatible colour string
"""
for z in self.settings.imOb.species[species].charges:
dataSlice = self.settings.imOb.dataSlices[species][z]
smths = self.gui.textCtrlSmoothes.GetValue()
wlen = self.gui.textCtrlWlen.GetValue()
smths, wlen = int(smths), int(wlen)
mz = dataSlice.getMsApex()
limit = float(self.gui.textCtrlLimit.GetValue())
ccsPeaks = dataSlice.getCcsPeaks(smths,wlen,mz,
self.settings.calibrationOb,
limit=limit)
ax.scatter([mz]*len(ccsPeaks),ccsPeaks,marker='o',color=colour,alpha=0.5)
def getContourXlims(self):
"""Get the m/z limits for the contour plot panel. Otherwise it
defaults to the extremities of the extracted Charge state slices
:returns: xlims - as [lower,upper]
"""
if self.gui.checkboxShowMassSpectrum.IsChecked():
xlims = self.axes[1].get_xlim()
else:
if self.settings.imOb:
xlimsL1 = self.settings.imOb.xaxis[0]
xlimsR1 = self.settings.imOb.xaxis[-1]
# should probably add another if for imOb2
xlims = [xlimsL1,xlimsR1]
return xlims
def refreshMsPlotOnly(self):
"""Update the mass spectrum panel of the plot alone.
"""
if self.gui.checkboxShowMassSpectrum.IsChecked():
self.axes[1].clear()
self.draw()
self.settings.imOb.massSpectrum.plot(self.axes[1])
if self.settings.atrOb:
self.plotAtroposSelection(self.axes[1])
self.draw()
def updateColourMap(self):
"""Update the colourmaps depending on what the background
background colour is.
"""
if self.backgroundColour == 'white':
self.cmap = mpl.colors.LinearSegmentedColormap.from_list(
'my_cmap',['white','b','k','k'],256)
self.cmap2 = mpl.colors.LinearSegmentedColormap.from_list(
'my_cmap',['white','r','k','k'],256)
elif self.backgroundColour == 'black':
self.cmap = mpl.colors.LinearSegmentedColormap.from_list(
'my_cmap',['k','b','w','w'],256)
self.cmap2 = mpl.colors.LinearSegmentedColormap.from_list(
'my_cmap',['k','r','w','w'],256)
def scalingDataSlices(self,dataSlices):
"""Change scaling between log and linear for intensity of dataSlices using
selection in ContourGui().
:parameter dataSlices: Dictionary of imClasses.DataSlice() objects, in the
form of d[speciesName][chargeState]
"""
if self.gui.choiceScale.GetSelection() == 1:
dataSlices = self.logScaleDataSlices(dataSlices)
# TODO(gns) - Shouldn't you do something to change back to non log scale?
return dataSlices
def logScaleMatrix(self,matrix):
"""Natural log matrix, do various manipulations to keep the
values between 0-100.
:parameter matrix: 2D numpy array of intensity values
"""
matrix = np.log(matrix)
matrixMin = np.min(matrix[np.isneginf(matrix)!=True])
matrixMax = np.max(matrix)
# correct for values under 0
matrix = matrix + matrixMin*-1
# Deal with non numerical results
matrix[np.isnan(matrix)] = 0.0
matrix[np.isneginf(matrix)] = 0.0
# scale the whole thing to 0-100
matrix = (matrix/(matrixMax+matrixMin*-1))*100.
return matrix
def logScaleDataSlices(self,dataSlices):
"""Natural log matrices, do various manipulations to keep the
values between 0-100.
:parameter dataSlices: Dictionary of imClasses.DataSlice() objects, in the
form of d[speciesName][chargeState]
"""
loggedMin = 0
loggedMax = 0
for sp,spSlices in dataSlices.items():
for z,dataSlice in spSlices.items():
matrix = np.log(dataSlices[sp][z].matrix)
# get minimum value for all dataSlices
# You need to ignore -inf values returned from logging 0
matrixMin = np.min(matrix[np.isneginf(matrix)!=True])
if matrixMin < loggedMin:
loggedMin = matrixMin
# log the maximum value for all the dataSlices for normalising to 100 later
matrixMax = np.max(matrix)
if matrixMax > loggedMax:
loggedMax = matrixMax
for sp,spSlices in dataSlices.items():
for z,dataSlice in spSlices.items():
# Log the matrix
dataSlices[sp][z].matrix = np.log(dataSlices[sp][z].matrix)
# correct for values under 0
dataSlices[sp][z].matrix = dataSlices[sp][z].matrix + loggedMin*-1
# Deal with non numerical results
dataSlices[sp][z].matrix[np.isnan(dataSlices[sp][z].matrix)] = 0.0
dataSlices[sp][z].matrix[np.isneginf(dataSlices[sp][z].matrix)] = 0.0
# scale the whole thing to 0-100
dataSlices[sp][z].matrix = (dataSlices[sp][z].matrix/(loggedMax+loggedMin*-1))*100.
return dataSlices
def draw(self):
"""Re-draw plot area.
"""
self.canvas.draw()
| gpl-2.0 |
0x0all/scikit-learn | sklearn/preprocessing/data.py | 10 | 39409 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import warn_if_not_float
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis)
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'Normalizer',
'OneHotEncoder',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
if isinstance(std_, np.ndarray):
std_[std_ == 0.0] = 1.0
elif std_ == 0.:
std_ = 1.
else:
std_ = None
return mean_, std_
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
warn_if_not_float(X, estimator='The scale function')
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var[var == 0.0] = 1.0
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
warn_if_not_float(X, estimator='The scale function')
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
if with_std:
Xr /= std_
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Standardizes features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The standardization is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This standardization is often used as an alternative to zero mean,
unit variance scaling.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default is True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False)
warn_if_not_float(X, estimator=self)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
# Do not scale constant features
if isinstance(data_range, np.ndarray):
data_range[data_range == 0.0] = 1.0
elif data_range == 0.:
data_range = 1.
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy, ensure_2d=False)
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_[var == 0.0] = 1.0
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False)
if warn_if_not_float(X, estimator=self):
X = X.astype(np.float)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ :
powers_[i, j] is the exponent of the jth input in the ith output.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _power_matrix(n_features, degree, interaction_only, include_bias):
"""Compute the matrix of polynomial powers"""
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
combn = chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
powers = np.vstack(np.bincount(c, minlength=n_features) for c in combn)
return powers
def fit(self, X, y=None):
"""
Compute the polynomial feature combinations
"""
n_samples, n_features = check_array(X).shape
self.powers_ = self._power_matrix(n_features, self.degree,
self.interaction_only,
self.include_bias)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.powers_.shape[1]:
raise ValueError("X shape does not match training shape")
return (X[:, None, :] ** self.powers_).prod(-1)
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy)
warn_if_not_float(X, 'The normalize function')
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
norms[norms == 0.0] = 1.0
elif norm == 'l2':
norms = row_norms(X)
norms[norms == 0.0] = 1.0
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Parameters
----------
norm : 'l1' or 'l2', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default is True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix were each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
Barmaley-exe/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
Querschlag/tickmate | analysis/tmkit/linear_regression.py | 5 | 2249 | import sqlite3
from sklearn import linear_model
import numpy as np
import pandas as pd
import datetime
import sys
conn = sqlite3.connect(sys.argv[1])
c = conn.cursor();
c.execute("select _id, name from tracks")
rows = c.fetchall()
track_names = pd.DataFrame([{'track_name': row[1]} for row in rows])
track_ids = [int(row[0]) for row in rows]
track_cnt = len(track_ids)
print "Found {0} tracks.".format(track_cnt)
c.execute("select * from ticks")
last_tick = c.fetchall()[-1]
last_day = datetime.date(last_tick[2], last_tick[3], last_tick[4])
def window(day, n=20):
"return a matrix of the last `n` days before day `day`"
tick_date = "date(year || '-' || substr('0' || month, -2, 2) || " + \
"'-' || substr('0' || day, -2, 2))"
max_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\
format(d=day)
min_date = "date('{d.year:04d}-{d.month:02d}-{d.day:02d}')".\
format(d=day-datetime.timedelta(n))
c.execute("select * from ticks where {d} <= {max_date} and {d} >= {min_date}".\
format(d=tick_date, max_date=max_date, min_date=min_date))
# ticktrix is the matrix containing the ticks
ticktrix = np.zeros((n, track_cnt))
for row in c.fetchall():
print row
try:
row_date = datetime.date(row[2], row[3], row[4])
except ValueError:
print "Error constructing date from", row
x = -(row_date - day).days
y = track_ids.index(int(row[1]))
if x < n:
ticktrix[x, y] = 1
return ticktrix
last_day -= datetime.timedelta(1)
print "Fitting for day:", last_day
my_window = window(last_day)
target_data = my_window[0,:].T
training_data = my_window[1:,:].T
print "Target:", target_data.shape
print target_data
print "Training:", training_data.shape
print training_data
reg = linear_model.LinearRegression()
reg.fit(training_data, target_data)
print "Coefficents:", reg.coef_.shape
print reg.coef_
print "Applied to training data:"
print np.dot(training_data, reg.coef_)
print "Forecast"
#print np.dot(my_window[:19,:].T, reg.coef_)
#print track_names
df = pd.DataFrame()
df['track'] = track_names
df['prob'] = pd.Series(np.dot(my_window[:19,:].T, reg.coef_) * 100.0)
print df
| gpl-3.0 |
leal26/AeroPy | examples/morphing/flight_conditions/morphed/range_constant_velocity.py | 2 | 6556 | import aeropy.xfoil_module as xf
from aeropy.geometry.airfoil import CST, create_x
from aeropy.morphing.camber_2D import *
from aeropy.aero_module import air_properties, Reynolds, LLT_calculator
from scipy.interpolate import griddata, RegularGridInterpolator
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import scipy
def aircraft_range_varying_AOA(f_L, f_LD, velocity):
def to_integrate(weight):
# velocity = 0.514444*108 # m/s (113 KTAS)
def calculate_AOA(velocity):
def residual(AOA):
CL = f_L([velocity, AOA[0]])[0]
span = 11
AR = 7.32
chord_root = span/AR
dynamic_pressure = .5*density*velocity**2*(span*chord_root)
return abs(CL - weight/dynamic_pressure)
if len(AOA_list) == 0:
x0 = 0
else:
x0 = AOA_list[-1]
res = scipy.optimize.minimize(residual, x0, bounds = [[0, 12],])#, options={'ftol':1e-9})
return res.x[0]
AOA = calculate_AOA(velocity)
# print(AOA)
AOA_list.append(AOA)
lift_to_drag = f_LD([velocity, AOA])
span = 10.9728
RPM = 1800
a = 0.3089 # (lb/hr)/BTU
b = 0.008*RPM+19.607 # lb/hr
lbhr_to_kgs = 0.000125998
BHP_to_watt = 745.7
eta = 0.85
thrust = weight/lift_to_drag
power_SI = thrust*velocity/eta
power_BHP = power_SI/BHP_to_watt
mass_flow = (a*power_BHP + b)
mass_flow_SI = mass_flow*lbhr_to_kgs
SFC = mass_flow_SI/thrust
dR = velocity/g/SFC*lift_to_drag/weight
return dR*0.001 # *0.0005399
AOA_list = []
g = 9.81 # kg/ms
fuel = 56*6.01*0.4535*g
initial_weight = 1111*g
final_weight = initial_weight-fuel
x = np.linspace(final_weight, initial_weight, 100)
y = []
for x_i in x:
y.append(to_integrate(x_i)[0])
range = scipy.integrate.simps(y, x)
return range, AOA_list
# ==============================================================================
# Inputs
# ==============================================================================
altitude = 10000 # ft
air_props = air_properties(altitude, unit='feet')
density = air_props['Density']
# data = pandas.read_csv('performance_grid.csv')
# psi_spars = [0.1, 0.3, 0.6, 0.8]
# c_P = 1.0
# ranges = []
# for i in range(len(data.values)):
# AC = data.values[i,0:4]
# velocity = data.values[i,-4]
# AOA = data.values[i,-5]
# cl= data.values[i,-3]
# cd = data.values[i,-2]
# CL, CD = coefficient_LLT(AC, velocity, AOA)
# data.values[i, -3] = CL
# data.values[i, -2] = CD
# data.values[i, -1] = CL/CD
# print(i, CL, CD)
# data = data.drop_duplicates()
import pickle
# f = open('wing.p', 'wb')
# pickle.dump(data, f)
# f.close()
state = 'morphed'
concepts = ['NACA0012', 'NACA4415', 'NACA641212', 'glider']
#
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# # print(aoa)
# # print(velocity)
# # print(cl)
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = concept)
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('cl')
# plt.show()
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,100)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = concept)
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('Lift-to-drag ratio')
# plt.show()
range_data = {}
plt.figure()
for concept in concepts:
data = np.loadtxt('./'+state + '_' + concept + '.txt')
aoa = np.unique(data[:,0])
velocity = np.unique(data[:,1])
cl = data[:,2].reshape([200,200])
LD_ratio = data[:,3].reshape([200,200])
f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = np.linspace(20, 65, 7)
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = velocity[i])
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = velocity[i])
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
ranges = []
# velocity = np.linspace(20, 60, 5)
for i in range(len(velocity)):
range_i, AOA_i = aircraft_range_varying_AOA(f_L, f_LD, velocity[i])
# plt.plot(np.arange(len(AOA_i)), AOA_i, label=velocity[i])
# plt.scatter(np.arange(len(AOA_i)),AOA_i)
print(i, velocity[i], range_i)
ranges.append(range_i)
# print(velocity[36])
range_data[concept] = ranges
plt.plot(velocity, ranges, lw=2, label=concept)
f = open('ranges_velocity.p', 'wb')
pickle.dump(range_data, f)
f.close()
# plt.xlim(min(velocity), max(velocity))
# plt.ylim(min(ranges), max(ranges))
plt.xlabel('Velocity (m/s)')
plt.ylabel('Range (km)')
plt.legend()
plt.show()
| mit |
dbtsai/spark | python/pyspark/sql/pandas/serializers.py | 3 | 11734 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Serializers for PyArrow and pandas conversions. See `pyspark.serializers` for more details.
"""
from pyspark.serializers import Serializer, read_int, write_int, UTF8Deserializer
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class ArrowCollectSerializer(Serializer):
"""
Deserialize a stream of batches followed by batch order information. Used in
PandasConversionMixin._collect_as_arrow() after invoking Dataset.collectAsArrowToPython()
in the JVM.
"""
def __init__(self):
self.serializer = ArrowStreamSerializer()
def dump_stream(self, iterator, stream):
return self.serializer.dump_stream(iterator, stream)
def load_stream(self, stream):
"""
Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields
a list of indices that can be used to put the RecordBatches in the correct order.
"""
# load the batches
for batch in self.serializer.load_stream(stream):
yield batch
# load the batch order indices or propagate any error that occurred in the JVM
num = read_int(stream)
if num == -1:
error_msg = UTF8Deserializer().loads(stream)
raise RuntimeError("An error occurred while calling "
"ArrowCollectSerializer.load_stream: {}".format(error_msg))
batch_order = []
for i in range(num):
index = read_int(stream)
batch_order.append(index)
yield batch_order
def __repr__(self):
return "ArrowCollectSerializer(%s)" % self.serializer
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
class ArrowStreamPandasSerializer(ArrowStreamSerializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
:param timezone: A timezone to respect when handling timestamp values
:param safecheck: If True, conversion from Arrow to Pandas checks for overflow/truncation
:param assign_cols_by_name: If True, then Pandas DataFrames will get columns by name
"""
def __init__(self, timezone, safecheck, assign_cols_by_name):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
self._safecheck = safecheck
self._assign_cols_by_name = assign_cols_by_name
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.pandas.types import _check_series_localize_timestamps
import pyarrow
# If the given column is a date type column, creates a series of datetime.date directly
# instead of creating datetime64[ns] as intermediate data to avoid overflow caused by
# datetime64[ns] type handling.
s = arrow_column.to_pandas(date_as_object=True)
if pyarrow.types.is_timestamp(arrow_column.type):
return _check_series_localize_timestamps(s, self._timezone)
else:
return s
def _create_batch(self, series):
"""
Create an Arrow record batch from the given pandas.Series or list of Series,
with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:return: Arrow RecordBatch
"""
import pandas as pd
import pyarrow as pa
from pyspark.sql.pandas.types import _check_series_convert_timestamps_internal
from pandas.api.types import is_categorical
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s, self._timezone)
elif is_categorical(s.dtype):
# Note: This can be removed once minimum pyarrow version is >= 0.16.1
s = s.astype(s.dtypes.categories.dtype)
try:
array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to Arrow " + \
"Array (%s). It can be caused by overflows or other unsafe " + \
"conversions warned by Arrow. Arrow safe type check can be " + \
"disabled by using SQL config " + \
"`spark.sql.execution.pandas.convertToArrowArraySafely`."
raise RuntimeError(error_msg % (s.dtype, t), e)
return array
arrs = []
for s, t in series:
if t is not None and pa.types.is_struct(t):
if not isinstance(s, pd.DataFrame):
raise ValueError("A field of type StructType expects a pandas.DataFrame, "
"but got: %s" % str(type(s)))
# Input partition and result pandas.DataFrame empty, make empty Arrays with struct
if len(s) == 0 and len(s.columns) == 0:
arrs_names = [(pa.array([], type=field.type), field.name) for field in t]
# Assign result columns by schema name if user labeled with strings
elif self._assign_cols_by_name and any(isinstance(name, str)
for name in s.columns):
arrs_names = [(create_array(s[field.name], field.type), field.name)
for field in t]
# Assign result columns by position
else:
arrs_names = [(create_array(s[s.columns[i]], field.type), field.name)
for i, field in enumerate(t)]
struct_arrs, struct_names = zip(*arrs_names)
arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names))
else:
arrs.append(create_array(s, t))
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in range(len(arrs))])
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
batches = (self._create_batch(series) for series in iterator)
super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
batches = super(ArrowStreamPandasSerializer, self).load_stream(stream)
import pyarrow as pa
for batch in batches:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class ArrowStreamPandasUDFSerializer(ArrowStreamPandasSerializer):
"""
Serializer used by Python worker to evaluate Pandas UDFs
"""
def __init__(self, timezone, safecheck, assign_cols_by_name, df_for_struct=False):
super(ArrowStreamPandasUDFSerializer, self) \
.__init__(timezone, safecheck, assign_cols_by_name)
self._df_for_struct = df_for_struct
def arrow_to_pandas(self, arrow_column):
import pyarrow.types as types
if self._df_for_struct and types.is_struct(arrow_column.type):
import pandas as pd
series = [super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(column)
.rename(field.name)
for column, field in zip(arrow_column.flatten(), arrow_column.type)]
s = pd.concat(series, axis=1)
else:
s = super(ArrowStreamPandasUDFSerializer, self).arrow_to_pandas(arrow_column)
return s
def dump_stream(self, iterator, stream):
"""
Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent.
This should be sent after creating the first record batch so in case of an error, it can
be sent back to the JVM before the Arrow stream starts.
"""
def init_stream_yield_batches():
should_write_start_length = True
for series in iterator:
batch = self._create_batch(series)
if should_write_start_length:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
should_write_start_length = False
yield batch
return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
def __repr__(self):
return "ArrowStreamPandasUDFSerializer"
class CogroupUDFSerializer(ArrowStreamPandasUDFSerializer):
def load_stream(self, stream):
"""
Deserialize Cogrouped ArrowRecordBatches to a tuple of Arrow tables and yield as two
lists of pandas.Series.
"""
import pyarrow as pa
dataframes_in_group = None
while dataframes_in_group is None or dataframes_in_group > 0:
dataframes_in_group = read_int(stream)
if dataframes_in_group == 2:
batch1 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
batch2 = [batch for batch in ArrowStreamSerializer.load_stream(self, stream)]
yield (
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch1).itercolumns()],
[self.arrow_to_pandas(c) for c in pa.Table.from_batches(batch2).itercolumns()]
)
elif dataframes_in_group != 0:
raise ValueError(
'Invalid number of pandas.DataFrames in group {0}'.format(dataframes_in_group))
| apache-2.0 |
bendudson/BOUT-0.8 | pylib/boututils/showdata.py | 2 | 4241 | # Display animations of data, similar to the IDL routine
# of the same name
#
# Ben Dudson, University of York, July 2009
#
#
try:
print "Trying to import GTK..."
import gobject
widget = "gtk"
except:
print "failed\nTrying to import WX..."
try:
from wx import *
widget = "wx"
except:
print "failed."
raise
try:
import matplotlib
if widget == "gtk":
matplotlib.use('GTKAgg')
else:
matplotlib.use('WXAgg') # do this before importing pylab
import numpy as np
import matplotlib.pyplot as plt
except ImportError:
print "ERROR: Showdata needs numpy, matplotlib and gobject modules"
raise
def showdata(data, scale=True, loop=False):
"""Animate a dataset, with time as first dimension
2D - Show a line plot
3D - Show a surface plot
scale = True Use the same scale for all times
loop = False Loop the dataset
"""
size = data.shape
ndims = len(size)
fig = plt.figure()
ax = fig.add_subplot(111)
if ndims == 2:
# Animate a line plot
if widget == "gtk":
# GTK method (default)
def animate():
line, = ax.plot(data[0,:])
if scale:
# Get data range
ax.set_ylim([np.min(data), np.max(data)])
while True:
for i in np.arange(size[0]):
line.set_ydata(data[i,:])
if not scale:
ax.set_ylim([np.min(data[i,:]), np.max(data[i,:])])
fig.canvas.draw()
yield True
if not loop: break
gobject.idle_add(lambda iter=animate(): iter.next())
else:
# WX widgets method
line, = ax.plot(data[0,:])
def animate(idleevent):
if scale:
# Get data range
ax.set_ylim([np.min(data), np.max(data)])
if animate.i == size[0]:
wx.GetApp().Bind(wx.EVT_IDLE, None)
return False
line.set_ydata(data[animate.i,:])
if not scale:
ax.set_ylim([np.min(data[i,:]), np.max(data[i,:])])
fig.canvas.draw_idle()
animate.i += 1
animate.i = 0
wx.GetApp().Bind(wx.EVT_IDLE, animate)
plt.show()
elif ndims == 3:
# Animate a contour plot
if widget == "gtk":
def animate():
cmap = None
m = plt.imshow(data[0,:,:], interpolation='bilinear', cmap=cmap, animated=True)
while True:
for i in np.arange(size[0]):
m.set_data(data[i,:,:])
fig.canvas.draw()
yield True
if not loop: break
gobject.idle_add(lambda iter=animate(): iter.next())
else:
# WX widgets method
cmap = None
m = plt.imshow(data[0,:,:], interpolation='bilinear', cmap=cmap, animated=True)
def animateContour(idleevent):
if animateContour.i == size[0]:
wx.GetApp().Bind(wx.EVT_IDLE, None)
return False
m.set_data(data[animateContour.i,:,:])
fig.canvas.draw_idle()
animateContour.i += 1
animateContour.i = 0
wx.GetApp().Bind(wx.EVT_IDLE, animateContour)
plt.show()
else:
print "Sorry can't handle this number of dimensions"
def test():
x = np.arange(0, 2*np.pi, 0.01)
t = np.arange(0, 2*np.pi, 0.1)
nt = len(t)
nx = len(x)
# Animated line plots
data = np.zeros([nt,nx])
for i in np.arange(nt):
data[i,:] = np.sin(x+i/10.0) * np.sin(10*x-i/5.0)
showdata(data, loop=True)
# Animated 2D plot
y = x
ny = len(y)
data2d = np.zeros([nt,nx,ny])
for i in np.arange(ny):
data2d[:,:,i] = data * np.sin(y[i])
showdata(data2d, loop=True)
| gpl-3.0 |
wjfwzzc/gensim_demo | LDA/sklearn_lda.py | 1 | 2091 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import operator
import sklearn.decomposition
import sklearn.feature_extraction.text
import sklearn.metrics.pairwise
try:
import preprocess
except ImportError:
from LDA import preprocess
courses_name = preprocess.courses_name
query_idx = 210
texts = preprocess.texts
texts = map(lambda text: " ".join(text), texts)
def build_nmf():
global query, sims
vectorizer = sklearn.feature_extraction.text.TfidfVectorizer()
corpus_tfidf = vectorizer.fit_transform(texts)
nmf = sklearn.decomposition.NMF(n_components=20, verbose=1)
corpus = nmf.fit_transform(corpus_tfidf)
index = sklearn.metrics.pairwise.cosine_distances(corpus)
query = corpus[query_idx]
sims = index[query_idx]
feature_names = vectorizer.get_feature_names()
print_top_words(nmf, feature_names)
def build_lda():
global query, sims
vectorizer = sklearn.feature_extraction.text.CountVectorizer()
corpus_tf = vectorizer.fit_transform(texts)
lda = sklearn.decomposition.LatentDirichletAllocation(n_topics=100, learning_method="online", max_iter=100,
evaluate_every=10, verbose=1, doc_topic_prior=50 / 100,
topic_word_prior=0.01)
corpus = lda.fit_transform(corpus_tf)
index = sklearn.metrics.pairwise.cosine_distances(corpus)
query = corpus[query_idx]
sims = index[query_idx]
feature_names = vectorizer.get_feature_names()
print_top_words(lda, feature_names)
def print_top_words(model, feature_names):
for topic_idx, topic in enumerate(model.components_):
print "Topic #%d:" % topic_idx
print " ".join([feature_names[i] for i in topic.argsort()[:-10 - 1:-1]])
def output():
print courses_name[query_idx], query
sort_sims = sorted(enumerate(sims), key=operator.itemgetter(1), reverse=True)
for idx in sort_sims[:10]:
print courses_name[idx[0]], idx
# build_nmf()
build_lda()
output()
| mit |
blackball/an-test6 | util/plotSipDistortion.py | 1 | 3343 | #! /usr/bin/env python
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
import sys
from math import sqrt, floor, ceil
from astrometry.util.util import Sip
from optparse import OptionParser
def plotDistortion(sip, W, H, ncells, exaggerate=1., doclf=True):
'''
Produces a plot showing the SIP distortion that was found, by drawing
a grid and distorting it. Allows exaggeration of the distortion for ease
of visualization.
sip -- an astrometry.util.Sip object
(duck-type: has "(dx,dy) = sip.get_distortion(x,y)")
W, H -- the image size
ncells -- the approximate number of grid cells to split the image into.
exaggerate -- the factor by which to exaggerate the distortion.
'''
ncells = float(ncells)
cellsize = sqrt(W * H / ncells)
nw = int(floor(W / cellsize))
nh = int(floor(H / cellsize))
#print 'Grid cell size', cellsize
#print 'N cells', nw, 'x', nh
cx = np.arange(nw+1) * cellsize + ((W - (nw*cellsize))/2.)
cy = np.arange(nh+1) * cellsize + ((H - (nh*cellsize))/2.)
# pixel step size for grid lines
step = 50
nx = int(np.ceil(W / float(step)))
ny = int(np.ceil(H / float(step)))
#xx = np.arange(-step, W+2*step, step)
#yy = np.arange(-step, H+2*step, step)
xx = np.linspace(1, W, nx)
yy = np.linspace(1, H, ny)
if doclf:
plt.clf()
for y in cy:
dx,dy = [],[]
for x in xx:
dxi,dyi = sip.get_distortion(x, y)
dx.append(dxi)
dy.append(dyi)
plt.plot(xx, y*np.ones_like(xx), 'k-', zorder=10)
dx = np.array(dx)
dy = np.array(dy)
if exaggerate != 1:
dx += (exaggerate * (dx - xx))
dy += (exaggerate * (dy - y))
plt.plot(dx, dy, 'r-', zorder=20)
for x in cx:
dx,dy = [],[]
for y in yy:
dxi,dyi = sip.get_distortion(x, y)
dx.append(dxi)
dy.append(dyi)
plt.plot(x*np.ones_like(yy), yy, 'k-', zorder=10)
dx = np.array(dx)
dy = np.array(dy)
if exaggerate != 1:
dx += (exaggerate * (dx - x))
dy += (exaggerate * (dy - yy))
plt.plot(dx, dy, 'r-', zorder=20, clip_on=False)
plt.axis('scaled')
plt.axis([0, W, 0, H])
def plotDistortionFile(sipfn, ext, ncells, **kwargs):
wcs = Sip(sipfn, ext)
if wcs is None:
raise RuntimeError('Failed to open WCS file %s' % sipfn)
plotDistortion(wcs, wcs.get_width(), wcs.get_height(), ncells, **kwargs)
if __name__ == '__main__':
parser = OptionParser('usage: %prog [options] <wcs-filename> <plot-output-filename>')
parser.add_option('-e', dest='ext', type='int', help='FITS extension to read WCS from (default 0)')
parser.add_option('-x', dest='exaggerate', type='float', help='Exaggeration factor')
parser.add_option('-c', dest='cells', type='int', help='Approx. number of pieces to cut image into (default:18)')
parser.set_defaults(ext=0, cells=18, exaggerate=1.)
opt,args = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(-1)
wcsfn = args[0]
outfn = args[1]
plotDistortionFile(wcsfn, opt.ext, opt.cells, exaggerate=opt.exaggerate)
plt.savefig(outfn)
| gpl-2.0 |
Funtimezzhou/TradeBuildTools | SAT eBook/chapter15/intraday_mr.py | 3 | 5391 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# intraday_mr.py
from __future__ import print_function
import datetime
import numpy as np
import pandas as pd
import statsmodels.api as sm
from strategy import Strategy
from event import SignalEvent
from backtest import Backtest
from hft_data import HistoricCSVDataHandlerHFT
from hft_portfolio import PortfolioHFT
from execution import SimulatedExecutionHandler
class IntradayOLSMRStrategy(Strategy):
"""
Uses ordinary least squares (OLS) to perform a rolling linear
regression to determine the hedge ratio between a pair of equities.
The z-score of the residuals time series is then calculated in a
rolling fashion and if it exceeds an interval of thresholds
(defaulting to [0.5, 3.0]) then a long/short signal pair are generated
(for the high threshold) or an exit signal pair are generated (for the
low threshold).
"""
def __init__(
self, bars, events, ols_window=100,
zscore_low=0.5, zscore_high=3.0
):
"""
Initialises the stat arb strategy.
Parameters:
bars - The DataHandler object that provides bar information
events - The Event Queue object.
"""
self.bars = bars
self.symbol_list = self.bars.symbol_list
self.events = events
self.ols_window = ols_window
self.zscore_low = zscore_low
self.zscore_high = zscore_high
self.pair = ('AREX', 'WLL')
self.datetime = datetime.datetime.utcnow()
self.long_market = False
self.short_market = False
def calculate_xy_signals(self, zscore_last):
"""
Calculates the actual x, y signal pairings
to be sent to the signal generator.
Parameters
zscore_last - The current zscore to test against
"""
y_signal = None
x_signal = None
p0 = self.pair[0]
p1 = self.pair[1]
dt = self.datetime
hr = abs(self.hedge_ratio)
# If we're long the market and below the
# negative of the high zscore threshold
if zscore_last <= -self.zscore_high and not self.long_market:
self.long_market = True
y_signal = SignalEvent(1, p0, dt, 'LONG', 1.0)
x_signal = SignalEvent(1, p1, dt, 'SHORT', hr)
# If we're long the market and between the
# absolute value of the low zscore threshold
if abs(zscore_last) <= self.zscore_low and self.long_market:
self.long_market = False
y_signal = SignalEvent(1, p0, dt, 'EXIT', 1.0)
x_signal = SignalEvent(1, p1, dt, 'EXIT', 1.0)
# If we're short the market and above
# the high zscore threshold
if zscore_last >= self.zscore_high and not self.short_market:
self.short_market = True
y_signal = SignalEvent(1, p0, dt, 'SHORT', 1.0)
x_signal = SignalEvent(1, p1, dt, 'LONG', hr)
# If we're short the market and between the
# absolute value of the low zscore threshold
if abs(zscore_last) <= self.zscore_low and self.short_market:
self.short_market = False
y_signal = SignalEvent(1, p0, dt, 'EXIT', 1.0)
x_signal = SignalEvent(1, p1, dt, 'EXIT', 1.0)
return y_signal, x_signal
def calculate_signals_for_pairs(self):
"""
Generates a new set of signals based on the mean reversion
strategy.
Calculates the hedge ratio between the pair of tickers.
We use OLS for this, althought we should ideall use CADF.
"""
# Obtain the latest window of values for each
# component of the pair of tickers
y = self.bars.get_latest_bars_values(
self.pair[0], "close", N=self.ols_window
)
x = self.bars.get_latest_bars_values(
self.pair[1], "close", N=self.ols_window
)
if y is not None and x is not None:
# Check that all window periods are available
if len(y) >= self.ols_window and len(x) >= self.ols_window:
# Calculate the current hedge ratio using OLS
self.hedge_ratio = sm.OLS(y, x).fit().params[0]
# Calculate the current z-score of the residuals
spread = y - self.hedge_ratio * x
zscore_last = ((spread - spread.mean())/spread.std())[-1]
# Calculate signals and add to events queue
y_signal, x_signal = self.calculate_xy_signals(zscore_last)
if y_signal is not None and x_signal is not None:
self.events.put(y_signal)
self.events.put(x_signal)
def calculate_signals(self, event):
"""
Calculate the SignalEvents based on market data.
"""
if event.type == 'MARKET':
self.calculate_signals_for_pairs()
if __name__ == "__main__":
csv_dir = '/path/to/your/csv/file' # CHANGE THIS!
symbol_list = ['AREX', 'WLL']
initial_capital = 100000.0
heartbeat = 0.0
start_date = datetime.datetime(2007, 11, 8, 10, 41, 0)
backtest = Backtest(
csv_dir, symbol_list, initial_capital, heartbeat,
start_date, HistoricCSVDataHandlerHFT, SimulatedExecutionHandler,
PortfolioHFT, IntradayOLSMRStrategy
)
backtest.simulate_trading()
| gpl-3.0 |
Garrett-R/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
shangwuhencc/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
xavierwu/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
madjelan/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
cuttlefishh/papers | red-sea-spatial-series/code/bokeh_html.py | 1 | 5994 | #!/usr/bin/env python
import pandas as pd
import click
from bokeh.io import vform
from bokeh.plotting import figure, show, output_file
from bokeh.models import CustomJS, ColumnDataSource
from bokeh.models.widgets import Select
from bokeh.palettes import (Blues9, BrBG9, BuGn9, BuPu9, GnBu9, Greens9,
Greys9, OrRd9, Oranges9, PRGn9, PiYG9, PuBu9,
PuBuGn9, PuOr9, PuRd9, Purples9, RdBu9, RdGy9,
RdPu9, RdYlBu9, RdYlGn9, Reds9, Spectral9, YlGn9,
YlGnBu9, YlOrBr9, YlOrRd9)
standard_palettes = dict([("Blues", Blues9), ("BrBG", BrBG9),
("BuGn", BuGn9), ("BuPu", BuPu9),
("GnBu", GnBu9), ("Greens", Greens9),
("Greys", Greys9), ("OrRd", OrRd9),
("Oranges", Oranges9), ("PRGn", PRGn9),
("PiYG", PiYG9), ("PuBu", PuBu9),
("PuBuGn", PuBuGn9), ("PuOr", PuOr9),
("PuRd", PuRd9), ("Purples", Purples9),
("RdBu", RdBu9), ("RdGy", RdGy9),
("RdPu", RdPu9), ("RdYlBu", RdYlBu9),
("RdYlGn", RdYlGn9), ("Reds", Reds9),
("Spectral", Spectral9), ("YlGn", YlGn9),
("YlGnBu", YlGnBu9), ("YlOrBr", YlOrBr9),
("YlOrRd", YlOrRd9)])
@click.command()
@click.option('--input_fp', '-i', type=click.Path(
exists=True, dir_okay=False, readable=True, resolve_path=True),
help='Input metadata file')
@click.option('--output_fp', '-o', type=click.Path(
dir_okay=False, writable=True, resolve_path=True),
help='Output filepath')
@click.option('--title', '-t', type=str, help='Title of the graph')
@click.option('--seperator', '-s', required=False, type=str, default=',',
help='Seperator on the file (Default ",")')
@click.option('--color_by', '-c', required=False, type=str,
help='Column name to color the data by (Default None)')
@click.option('--palette', '-p', required=False,
help='Color palette to use, or string in the form '
'group1:color,group2:color\n'
'Possible palletes:\n' + ', '.join(standard_palettes.keys()))
@click.option('--legend_pos', '-l', required=False, default='top_right',
type=click.Choice(
['top_right', 'top_left', 'bottom_right', 'bottom_left']),
help='Seperator on the file (Default ",")')
@click.argument('remcols', nargs=-1, type=str)
def build_bokeh(input_fp, output_fp, title, seperator=',', remcols=None,
color_by=None, palette=None, legend_pos='top_right'):
data = pd.DataFrame.from_csv(input_fp, sep=seperator)
# Put depth as first column
cols = data.columns.tolist()
popped = cols.pop(7)
cols.insert(0, popped)
data = data[cols]
# Change odd depths to regular ones
data['Depth (m)'].replace(47, 50, inplace=True)
#data['Depth (m)'].replace(258, 200, inplace=True)
# Drop unwanted columns if needed
if remcols:
remcols = list(remcols)
data.drop(remcols, axis=1, inplace=True)
# Build out the colors for the graph if needed
legend = []
if color_by is not None:
groups = data.groupby(color_by).groups
group_list = sorted(groups.keys(), reverse=True)
# Grab colormap or use provided
if len(palette.split(',')) > 1:
# Format into colormap-like object
p = palette.split(',')
hold = dict(x.split(':') for x in p)
colormap = [hold[str(g)] for g in group_list]
elif len(groups) > 9:
raise ValueError("Can only support up to 9 groups, "
"%s has %d groups" % color_by, len(groups))
else:
colormap = standard_palettes[palette]
# Build colormap
index = []
colors = []
for group_num, group in enumerate(group_list):
vals = groups[group]
index.extend(vals)
colors.extend([colormap[group_num]] * len(vals))
# build legend colors list
legend.append((str(group), colormap[group_num]))
data['colormap'] = pd.Series(colors, index=index)
# Build the actual graph page
source = ColumnDataSource(data=data)
callback = CustomJS(args=dict(source=source), code="""
var data = source.get('data');
var title = cb_obj.get('title');
var value = cb_obj.get('value');
if(title == "X-axis:") {
data['x'] = data[value];
} else {
data['y'] = data[value];
}
source.trigger('change');
""")
select_x = Select(title="X-axis:", value=data.columns[0],
options=list(data.columns), callback=callback)
select_y = Select(title="Y-axis:", value=data.columns[0],
options=list(data.columns), callback=callback)
output_file(output_fp, title=title)
p = figure(title=title)
p.xaxis.axis_label = 'X-axis'
p.yaxis.axis_label = 'Y-axis'
# Create graph itself, with colormap and color legend if needed
if color_by is not None:
p.circle(x=data[data.columns[0]], y=data[data.columns[0]], size=10,
source=source, legend=False, color=data['colormap'],
fill_alpha=0, line_width=2)
# Add legend spots
for title, color in legend[::-1]:
p.circle(x=[], y=[], size=10, color=color, legend=title,
fill_alpha=0, line_width=2)
p.legend.orientation = legend_pos
else:
p.circle(x=data[data.columns[0]], y=data[data.columns[0]], size=10,
source=source, legend=False, fill_alpha=0, line_width=2)
layout = vform(p, select_x, select_y)
show(layout)
if __name__ == "__main__":
build_bokeh()
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/tri/tripcolor.py | 8 | 5868 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.collections import PolyCollection, TriMesh
from matplotlib.colors import Normalize
from matplotlib.tri.triangulation import Triangulation
import numpy as np
def tripcolor(ax, *args, **kwargs):
"""
Create a pseudocolor plot of an unstructured triangular grid.
The triangulation can be specified in one of two ways; either::
tripcolor(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
tripcolor(x, y, ...)
tripcolor(x, y, triangles, ...)
tripcolor(x, y, triangles=triangles, ...)
tripcolor(x, y, mask=mask, ...)
tripcolor(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The next argument must be *C*, the array of color values, either
one per point in the triangulation if color values are defined at
points, or one per triangle in the triangulation if color values
are defined at triangles. If there are the same number of points
and triangles in the triangulation it is assumed that color
values are defined at points; to force the use of color values at
triangles use the kwarg *facecolors*=C instead of just *C*.
*shading* may be 'flat' (the default) or 'gouraud'. If *shading*
is 'flat' and C values are defined at points, the color values
used for each triangle are from the mean C of the triangle's
three points. If *shading* is 'gouraud' then color values must be
defined at points.
The remaining kwargs are the same as for
:meth:`~matplotlib.axes.Axes.pcolor`.
**Example:**
.. plot:: mpl_examples/pylab_examples/tripcolor_demo.py
"""
if not ax._hold:
ax.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
facecolors = kwargs.pop('facecolors', None)
if shading not in ['flat', 'gouraud']:
raise ValueError("shading must be one of ['flat', 'gouraud'] "
"not {0}".format(shading))
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
# C is the colors array defined at either points or faces (i.e. triangles).
# If facecolors is None, C are defined at points.
# If facecolors is not None, C are defined at faces.
if facecolors is not None:
C = facecolors
else:
C = np.asarray(args[0])
# If there are a different number of points and triangles in the
# triangulation, can omit facecolors kwarg as it is obvious from
# length of C whether it refers to points or faces.
# Do not do this for gouraud shading.
if (facecolors is None and len(C) == len(tri.triangles) and
len(C) != len(tri.x) and shading != 'gouraud'):
facecolors = C
# Check length of C is OK.
if ((facecolors is None and len(C) != len(tri.x)) or
(facecolors is not None and len(C) != len(tri.triangles))):
raise ValueError('Length of color values array must be the same '
'as either the number of triangulation points '
'or triangles')
# Handling of linewidths, shading, edgecolors and antialiased as
# in Axes.pcolor
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and ec.lower() == "none":
kwargs['antialiaseds'] = False
if shading == 'gouraud':
if facecolors is not None:
raise ValueError('Gouraud shading does not support the use '
'of facecolors kwarg')
if len(C) != len(tri.x):
raise ValueError('For gouraud shading, the length of color '
'values array must be the same as the '
'number of triangulation points')
collection = TriMesh(tri, **kwargs)
else:
# Vertices of triangles.
maskedTris = tri.get_masked_triangles()
verts = np.concatenate((tri.x[maskedTris][..., np.newaxis],
tri.y[maskedTris][..., np.newaxis]), axis=2)
# Color values.
if facecolors is None:
# One color per triangle, the mean of the 3 vertex color values.
C = C[maskedTris].mean(axis=1)
elif tri.mask is not None:
# Remove color values of masked triangles.
C = C.compress(1-tri.mask)
collection = PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None:
if not isinstance(norm, Normalize):
msg = "'norm' must be an instance of 'Normalize'"
raise ValueError(msg)
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
ax.grid(False)
minx = tri.x.min()
maxx = tri.x.max()
miny = tri.y.min()
maxy = tri.y.max()
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
ax.add_collection(collection)
return collection
| mit |
BiaDarkia/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 1 | 10377 | """Unsupervised evaluation metrics."""
# Authors: Robert Layton <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, optional (default=None)
The generator used to randomly select a subset of samples. If int,
random_state is the seed used by the random number generator; If
RandomState instance, random_state is the random number generator; If
None, the random number generator is the RandomState instance used by
`np.random`. Used when ``sample_size is not None``.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
le = LabelEncoder()
labels = le.fit_transform(labels)
check_number_of_labels(len(le.classes_), X.shape[0])
distances = pairwise_distances(X, metric=metric, **kwds)
unique_labels = le.classes_
n_samples_per_label = np.bincount(labels, minlength=len(unique_labels))
# For sample i, store the mean distance of the cluster to which
# it belongs in intra_clust_dists[i]
intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype)
# For sample i, store the mean distance of the second closest
# cluster in inter_clust_dists[i]
inter_clust_dists = np.inf + intra_clust_dists
for curr_label in range(len(unique_labels)):
# Find inter_clust_dist for all samples belonging to the same
# label.
mask = labels == curr_label
current_distances = distances[mask]
# Leave out current sample.
n_samples_curr_lab = n_samples_per_label[curr_label] - 1
if n_samples_curr_lab != 0:
intra_clust_dists[mask] = np.sum(
current_distances[:, mask], axis=1) / n_samples_curr_lab
# Now iterate over all other labels, finding the mean
# cluster distance that is closest to every sample.
for other_label in range(len(unique_labels)):
if other_label != curr_label:
other_mask = labels == other_label
other_distances = np.mean(
current_distances[:, other_mask], axis=1)
inter_clust_dists[mask] = np.minimum(
inter_clust_dists[mask], other_distances)
sil_samples = inter_clust_dists - intra_clust_dists
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# score 0 for clusters of size 1, according to the paper
sil_samples[n_samples_per_label.take(labels) == 1] = 0
return sil_samples
def calinski_harabaz_score(X, labels):
"""Compute the Calinski and Harabaz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabaz_index>`.
Parameters
----------
X : array-like, shape (``n_samples``, ``n_features``)
List of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like, shape (``n_samples``,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabaz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
| bsd-3-clause |
droud/courses | deeplearning1/nbs/utils.py | 8 | 7644 | from __future__ import division,print_function
import math, os, json, sys, re
import cPickle as pickle
from glob import glob
import numpy as np
from matplotlib import pyplot as plt
from operator import itemgetter, attrgetter, methodcaller
from collections import OrderedDict
import itertools
from itertools import chain
import pandas as pd
import PIL
from PIL import Image
from numpy.random import random, permutation, randn, normal, uniform, choice
from numpy import newaxis
import scipy
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from scipy.ndimage import imread
from sklearn.metrics import confusion_matrix
import bcolz
from sklearn.preprocessing import OneHotEncoder
from sklearn.manifold import TSNE
from IPython.lib.display import FileLink
import theano
from theano import shared, tensor as T
from theano.tensor.nnet import conv2d, nnet
from theano.tensor.signal import pool
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Input, Embedding, Reshape, merge, LSTM, Bidirectional
from keras.layers import TimeDistributed, Activation, SimpleRNN, GRU
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.regularizers import l2, activity_l2, l1, activity_l1
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, RMSprop, Adam
from keras.utils.layer_utils import layer_from_config
from keras.metrics import categorical_crossentropy, categorical_accuracy
from keras.layers.convolutional import *
from keras.preprocessing import image, sequence
from keras.preprocessing.text import Tokenizer
from vgg16 import *
from vgg16bn import *
np.set_printoptions(precision=4, linewidth=100)
to_bw = np.array([0.299, 0.587, 0.114])
def gray(img):
return np.rollaxis(img,0,3).dot(to_bw)
def to_plot(img):
return np.rollaxis(img, 0, 3).astype(np.uint8)
def plot(img):
plt.imshow(to_plot(img))
def floor(x):
return int(math.floor(x))
def ceil(x):
return int(math.ceil(x))
def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] != 3):
ims = ims.transpose((0,2,3,1))
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
if titles is not None:
sp.set_title(titles[i], fontsize=18)
plt.imshow(ims[i], interpolation=None if interp else 'none')
def do_clip(arr, mx):
clipped = np.clip(arr, (1-mx)/1, mx)
return clipped/clipped.sum(axis=1)[:, np.newaxis]
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical',
target_size=(224,224)):
return gen.flow_from_directory(dirname, target_size=target_size,
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def onehot(x):
return to_categorical(x)
def wrap_config(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
def copy_layer(layer): return layer_from_config(wrap_config(layer))
def copy_layers(layers): return [copy_layer(layer) for layer in layers]
def copy_weights(from_layers, to_layers):
for from_layer,to_layer in zip(from_layers, to_layers):
to_layer.set_weights(from_layer.get_weights())
def copy_model(m):
res = Sequential(copy_layers(m.layers))
copy_weights(m.layers, res.layers)
return res
def insert_layer(model, new_layer, index):
res = Sequential()
for i,layer in enumerate(model.layers):
if i==index: res.add(new_layer)
copied = layer_from_config(wrap_config(layer))
res.add(copied)
copied.set_weights(layer.get_weights())
return res
def adjust_dropout(weights, prev_p, new_p):
scal = (1-prev_p)/(1-new_p)
return [o*scal for o in weights]
def get_data(path, target_size=(224,224)):
batches = get_batches(path, shuffle=False, batch_size=1, class_mode=None, target_size=target_size)
return np.concatenate([batches.next() for i in range(batches.nb_sample)])
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
(This function is copied from the scikit docs.)
"""
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j], horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def mk_size(img, r2c):
r,c,_ = img.shape
curr_r2c = r/c
new_r, new_c = r,c
if r2c>curr_r2c:
new_r = floor(c*r2c)
else:
new_c = floor(r/r2c)
arr = np.zeros((new_r, new_c, 3), dtype=np.float32)
r2=(new_r-r)//2
c2=(new_c-c)//2
arr[floor(r2):floor(r2)+r,floor(c2):floor(c2)+c] = img
return arr
def mk_square(img):
x,y,_ = img.shape
maxs = max(img.shape[:2])
y2=(maxs-y)//2
x2=(maxs-x)//2
arr = np.zeros((maxs,maxs,3), dtype=np.float32)
arr[floor(x2):floor(x2)+x,floor(y2):floor(y2)+y] = img
return arr
def vgg_ft(out_dim):
vgg = Vgg16()
vgg.ft(out_dim)
model = vgg.model
return model
def vgg_ft_bn(out_dim):
vgg = Vgg16BN()
vgg.ft(out_dim)
model = vgg.model
return model
def get_classes(path):
batches = get_batches(path+'train', shuffle=False, batch_size=1)
val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)
test_batches = get_batches(path+'test', shuffle=False, batch_size=1)
return (val_batches.classes, batches.classes, onehot(val_batches.classes), onehot(batches.classes),
val_batches.filenames, batches.filenames, test_batches.filenames)
def split_at(model, layer_type):
layers = model.layers
layer_idx = [index for index,layer in enumerate(layers)
if type(layer) is layer_type][-1]
return layers[:layer_idx+1], layers[layer_idx+1:]
class MixIterator(object):
def __init__(self, iters):
self.iters = iters
self.multi = type(iters) is list
if self.multi:
self.N = sum([it[0].N for it in self.iters])
else:
self.N = sum([it.N for it in self.iters])
def reset(self):
for it in self.iters: it.reset()
def __iter__(self):
return self
def next(self, *args, **kwargs):
if self.multi:
nexts = [[next(it) for it in o] for o in self.iters]
n0s = np.concatenate([n[0] for n in o])
n1s = np.concatenate([n[1] for n in o])
return (n0, n1)
else:
nexts = [next(it) for it in self.iters]
n0 = np.concatenate([n[0] for n in nexts])
n1 = np.concatenate([n[1] for n in nexts])
return (n0, n1)
| apache-2.0 |
CforED/Machine-Learning | setup.py | 19 | 11460 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
import subprocess
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
from sklearn._build_utils import cythonize
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
cythonize.main(cwd)
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
Subsets and Splits