text
stringlengths 81
112k
|
|---|
the current packet size.
:return: the current packet size based on the enabled registers.
def getPacketSize(self):
"""
the current packet size.
:return: the current packet size based on the enabled registers.
"""
size = 0
if self.isAccelerometerEnabled():
size += 6
if self.isGyroEnabled():
size += 6
if self.isTemperatureEnabled():
size += 2
return size
|
performs initialisation of the device
:param batchSize: the no of samples that each provideData call should yield
:return:
def initialiseDevice(self):
"""
performs initialisation of the device
:param batchSize: the no of samples that each provideData call should yield
:return:
"""
logger.debug("Initialising device")
self.getInterruptStatus()
self.setAccelerometerSensitivity(self._accelerationFactor * 32768.0)
self.setGyroSensitivity(self._gyroFactor * 32768.0)
self.setSampleRate(self.fs)
for loop in self.ZeroRegister:
self.i2c_io.write(self.MPU6050_ADDRESS, loop, 0)
# Sets clock source to gyro reference w/ PLL
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_1, 0b00000010)
# Controls frequency of wakeups in accel low power mode plus the sensor standby modes
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_PWR_MGMT_2, 0x00)
# Enables any I2C master interrupt source to generate an interrupt
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_INT_ENABLE, 0x01)
# enable the FIFO
self.enableFifo()
logger.debug("Initialised device")
|
Specifies the device should write acceleration values to the FIFO, is not applied until enableFIFO is called.
:return:
def enableAccelerometer(self):
"""
Specifies the device should write acceleration values to the FIFO, is not applied until enableFIFO is called.
:return:
"""
logger.debug("Enabling acceleration sensor")
self.fifoSensorMask |= self.enableAccelerometerMask
self._accelEnabled = True
self._setSampleSizeBytes()
|
Specifies the device should NOT write acceleration values to the FIFO, is not applied until enableFIFO is
called.
:return:
def disableAccelerometer(self):
"""
Specifies the device should NOT write acceleration values to the FIFO, is not applied until enableFIFO is
called.
:return:
"""
logger.debug("Disabling acceleration sensor")
self.fifoSensorMask &= ~self.enableAccelerometerMask
self._accelEnabled = False
self._setSampleSizeBytes()
|
Specifies the device should write gyro values to the FIFO, is not applied until enableFIFO is called.
:return:
def enableGyro(self):
"""
Specifies the device should write gyro values to the FIFO, is not applied until enableFIFO is called.
:return:
"""
logger.debug("Enabling gyro sensor")
self.fifoSensorMask |= self.enableGyroMask
self._gyroEnabled = True
self._setSampleSizeBytes()
|
Specifies the device should NOT write gyro values to the FIFO, is not applied until enableFIFO is called.
:return:
def disableGyro(self):
"""
Specifies the device should NOT write gyro values to the FIFO, is not applied until enableFIFO is called.
:return:
"""
logger.debug("Disabling gyro sensor")
self.fifoSensorMask &= ~self.enableGyroMask
self._gyroEnabled = False
self._setSampleSizeBytes()
|
Specifies the device should write temperature values to the FIFO, is not applied until enableFIFO is called.
:return:
def enableTemperature(self):
"""
Specifies the device should write temperature values to the FIFO, is not applied until enableFIFO is called.
:return:
"""
logger.debug("Enabling temperature sensor")
self.fifoSensorMask |= self.enableTemperatureMask
self._setSampleSizeBytes()
|
Specifies the device should NOT write temperature values to the FIFO, is not applied until enableFIFO is called.
:return:
def disableTemperature(self):
"""
Specifies the device should NOT write temperature values to the FIFO, is not applied until enableFIFO is called.
:return:
"""
logger.debug("Disabling temperature sensor")
self.fifoSensorMask &= ~self.enableTemperatureMask
self._setSampleSizeBytes()
|
Sets the gyro sensitivity to 250, 500, 1000 or 2000 according to the given value (and implicitly disables the
self
tests)
:param value: the target sensitivity.
def setGyroSensitivity(self, value):
"""
Sets the gyro sensitivity to 250, 500, 1000 or 2000 according to the given value (and implicitly disables the
self
tests)
:param value: the target sensitivity.
"""
try:
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_GYRO_CONFIG,
{250: 0, 500: 8, 1000: 16, 2000: 24}[value])
self._gyroFactor = value / 32768.0
self.gyroSensitivity = value
logger.debug("Set gyro sensitivity = %d", value)
except KeyError:
raise ArgumentError(value + " is not a valid sensitivity (250,500,1000,2000)")
|
Sets the accelerometer sensitivity to 2, 4, 8 or 16 according to the given value. Throws an ArgumentError if
the value provided is not valid.
:param value: the target sensitivity.
def setAccelerometerSensitivity(self, value):
"""
Sets the accelerometer sensitivity to 2, 4, 8 or 16 according to the given value. Throws an ArgumentError if
the value provided is not valid.
:param value: the target sensitivity.
"""
# note that this implicitly disables the self tests on each axis
# i.e. the full byte is actually 000[accel]000 where the 1st 3 are the accelerometer self tests, the next two
# values are the actual sensitivity and the last 3 are unused
# the 2 [accel] bits are translated by the device as follows; 00 = 2g, 01 = 4g, 10 = 8g, 11 = 16g
# in binary we get 2 = 0, 4 = 1000, 8 = 10000, 16 = 11000
# so the 1st 3 bits are always 0
try:
self.i2c_io.write(self.MPU6050_ADDRESS,
self.MPU6050_RA_ACCEL_CONFIG,
{2: 0, 4: 8, 8: 16, 16: 24}[value])
self._accelerationFactor = value / 32768.0
self.accelerometerSensitivity = value
logger.debug("Set accelerometer sensitivity = %d", value)
except KeyError:
raise ArgumentError(value + " is not a valid sensitivity (2,4,8,18)")
|
Sets the internal sample rate of the MPU-6050, this requires writing a value to the device to set the sample
rate as Gyroscope Output Rate / (1 + SMPLRT_DIV) where the gryoscope outputs at 8kHz and the peak sampling rate
is 1kHz. The target sample rate is therefore capped at 1kHz.
:param targetSampleRate: the target sample rate.
:return:
def setSampleRate(self, targetSampleRate):
"""
Sets the internal sample rate of the MPU-6050, this requires writing a value to the device to set the sample
rate as Gyroscope Output Rate / (1 + SMPLRT_DIV) where the gryoscope outputs at 8kHz and the peak sampling rate
is 1kHz. The target sample rate is therefore capped at 1kHz.
:param targetSampleRate: the target sample rate.
:return:
"""
sampleRateDenominator = int((8000 / min(targetSampleRate, 1000)) - 1)
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_SMPLRT_DIV, sampleRateDenominator)
self.fs = 8000.0 / (sampleRateDenominator + 1.0)
logger.debug("Set sample rate = %d", self.fs)
|
Resets the FIFO by first disabling the FIFO then sending a FIFO_RESET and then re-enabling the FIFO.
:return:
def resetFifo(self):
"""
Resets the FIFO by first disabling the FIFO then sending a FIFO_RESET and then re-enabling the FIFO.
:return:
"""
logger.debug("Resetting FIFO")
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_USER_CTRL, 0b00000000)
pass
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_USER_CTRL, 0b00000100)
pass
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_USER_CTRL, 0b01000000)
self.getInterruptStatus()
|
Enables the FIFO, resets it and then sets which values should be written to the FIFO.
:return:
def enableFifo(self):
"""
Enables the FIFO, resets it and then sets which values should be written to the FIFO.
:return:
"""
logger.debug("Enabling FIFO")
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_EN, 0)
self.resetFifo()
self.i2c_io.write(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_EN, self.fifoSensorMask)
logger.debug("Enabled FIFO")
|
gets the amount of data available on the FIFO right now.
:return: the number of bytes available on the FIFO which will be proportional to the number of samples available
based on the values the device is configured to sample.
def getFifoCount(self):
"""
gets the amount of data available on the FIFO right now.
:return: the number of bytes available on the FIFO which will be proportional to the number of samples available
based on the values the device is configured to sample.
"""
bytes = self.i2c_io.readBlock(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_COUNTH, 2)
count = (bytes[0] << 8) + bytes[1]
logger.debug("FIFO Count: %d", count)
return count
|
reads the specified number of bytes from the FIFO, should be called after a call to getFifoCount to ensure there
is new data available (to avoid reading duplicate data).
:param bytesToRead: the number of bytes to read.
:return: the bytes read.
def getDataFromFIFO(self, bytesToRead):
"""
reads the specified number of bytes from the FIFO, should be called after a call to getFifoCount to ensure there
is new data available (to avoid reading duplicate data).
:param bytesToRead: the number of bytes to read.
:return: the bytes read.
"""
return self.i2c_io.readBlock(self.MPU6050_ADDRESS, self.MPU6050_RA_FIFO_R_W, bytesToRead)
|
reads a batchSize batch of data from the FIFO while attempting to optimise the number of times we have to read
from the device itself.
:return: a list of data where each item is a single sample of data converted into real values and stored as a
dict.
def provideData(self):
"""
reads a batchSize batch of data from the FIFO while attempting to optimise the number of times we have to read
from the device itself.
:return: a list of data where each item is a single sample of data converted into real values and stored as a
dict.
"""
samples = []
fifoBytesAvailable = 0
fifoWasReset = False
logger.debug(">> provideData target %d samples", self.samplesPerBatch)
iterations = 0
# allow 1.5x the expected duration of the batch
breakTime = time() + ((self.samplesPerBatch / self.fs) * 1.5)
overdue = False
while len(samples) < self.samplesPerBatch and not overdue:
iterations += 1
if iterations > self.samplesPerBatch and iterations % 100 == 0:
if time() > breakTime:
logger.warning("Breaking measurement after %d iterations, batch overdue", iterations)
overdue = True
if fifoBytesAvailable < self.sampleSizeBytes or fifoWasReset:
interrupt = self.getInterruptStatus()
fifoBytesAvailable = self.getFifoCount()
fifoWasReset = False
logger.debug("Start sample loop [available: %d , required: %d]", fifoBytesAvailable, self.sampleSizeBytes)
if interrupt & 0x10:
logger.error("FIFO OVERFLOW, RESETTING [available: %d , interrupt: %d]", fifoBytesAvailable, interrupt)
self.measurementOverflowed = True
self.resetFifo()
fifoWasReset = True
elif fifoBytesAvailable == 1024:
logger.error("FIFO FULL, RESETTING [available: %d , interrupt: %d]", fifoBytesAvailable, interrupt)
self.measurementOverflowed = True
self.resetFifo()
fifoWasReset = True
elif interrupt & 0x02 or interrupt & 0x01:
# wait for at least 1 sample to arrive, should be a VERY short wait
while fifoBytesAvailable < self.sampleSizeBytes:
logger.debug("Waiting for sample [available: %d , required: %d]", fifoBytesAvailable,
self.sampleSizeBytes)
fifoBytesAvailable = self.getFifoCount()
logger.debug("Processing data [available: %d , required: %d]", fifoBytesAvailable, self.sampleSizeBytes)
fifoReadBytes = self.sampleSizeBytes
# TODO this chunk of code is a bit messy, tidy it up
# if we have more than 1 sample available then ensure we read as many as we can at once (albeit within
# the limits of the max i2c read size of 32 bytes)
if fifoBytesAvailable > self.sampleSizeBytes:
fifoReadBytes = min(fifoBytesAvailable // self.sampleSizeBytes,
self.maxBytesPerFifoRead) * self.sampleSizeBytes
logger.debug("Excess bytes to read [available: %d , reading: %d]", fifoBytesAvailable,
fifoReadBytes)
# but don't read more than we need to fulfil the batch
samplesToRead = fifoReadBytes // self.sampleSizeBytes
excessSamples = self.samplesPerBatch - len(samples) - samplesToRead
if excessSamples < 0:
samplesToRead += excessSamples
fifoReadBytes = int(samplesToRead * self.sampleSizeBytes)
logger.debug("Excess samples to read [available: %d , reading: %d]", fifoBytesAvailable,
fifoReadBytes)
else:
logger.debug("Reading [available: %d , reading: %d]", fifoBytesAvailable, fifoReadBytes)
# read the bytes from the fifo, break it into sample sized chunks and convert to the actual values
fifoBytes = self.getDataFromFIFO(fifoReadBytes)
samples.extend([self.unpackSample(fifoBytes[i:i + self.sampleSizeBytes])
for i in range(0, len(fifoBytes), self.sampleSizeBytes)])
# track the count here so we can avoid going back to the FIFO each time
fifoBytesAvailable -= fifoReadBytes
logger.debug("End sample loop [available: %d , required: %d]", fifoBytesAvailable, self.sampleSizeBytes)
logger.debug("<< provideData %d samples", len(samples))
return samples
|
unpacks a single sample of data (where sample length is based on the currently enabled sensors).
:param rawData: the data to convert
:return: a converted data set.
def unpackSample(self, rawData):
"""
unpacks a single sample of data (where sample length is based on the currently enabled sensors).
:param rawData: the data to convert
:return: a converted data set.
"""
length = len(rawData)
# TODO error if not multiple of 2
# logger.debug(">> unpacking sample %d length %d", self._sampleIdx, length)
unpacked = struct.unpack(">" + ('h' * (length // 2)), memoryview(bytearray(rawData)).tobytes())
# store the data in a dictionary
mpu6050 = collections.OrderedDict()
mpu6050[SAMPLE_TIME] = self._sampleIdx / self.fs
sensorIdx = 0
if self.isAccelerometerEnabled():
mpu6050[ACCEL_X] = unpacked[sensorIdx] * self._accelerationFactor
sensorIdx += 1
mpu6050[ACCEL_Y] = unpacked[sensorIdx] * self._accelerationFactor
sensorIdx += 1
mpu6050[ACCEL_Z] = unpacked[sensorIdx] * self._accelerationFactor
sensorIdx += 1
if self.isTemperatureEnabled():
mpu6050[TEMP] = unpacked[sensorIdx] * self._temperatureGain + self._temperatureOffset
sensorIdx += 1
if self.isGyroEnabled():
mpu6050[GYRO_X] = unpacked[sensorIdx] * self._gyroFactor
sensorIdx += 1
mpu6050[GYRO_Y] = unpacked[sensorIdx] * self._gyroFactor
sensorIdx += 1
mpu6050[GYRO_Z] = unpacked[sensorIdx] * self._gyroFactor
sensorIdx += 1
# TODO should we send as a dict so the keys are available?
output = list(mpu6050.values())
self._sampleIdx += 1
# logger.debug("<< unpacked sample length %d into vals size %d", length, len(output))
return output
|
Like :func:`textwrap.wrap` but preserves existing newlines which
:func:`textwrap.wrap` does not otherwise handle well.
See Also
--------
:func:`textwrap.wrap`
def wrap(text, width, *args, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing newlines which
:func:`textwrap.wrap` does not otherwise handle well.
See Also
--------
:func:`textwrap.wrap`
"""
return sum([textwrap.wrap(line, width, *args, **kwargs)
if line else [''] for line in text.splitlines()], [])
|
Outputs line-wrapped text wrapped in a box drawn with a repeated (usually
ASCII) character.
For example:
>>> print(textbox('Text to wrap', width=16))
################
# #
# Text to wrap #
# #
################
Parameters
----------
text : string
The text to wrap
width : int
The width of the entire box, including the perimeter and
the indentation space. Because the
wrapped text is padded with an additional column of whitespace on each
side, the minimum width is 5--any width less than that is
is automatically increased to 5 (default: 78)
boxchar : string
(No pun intended.) The character to draw the box with. May also
be a string of multiple characters (default: '#')
indent : int
Amount of space by which the box should be indented. (default: 0)
def textbox(text, width=78, boxchar='#', indent=0):
"""
Outputs line-wrapped text wrapped in a box drawn with a repeated (usually
ASCII) character.
For example:
>>> print(textbox('Text to wrap', width=16))
################
# #
# Text to wrap #
# #
################
Parameters
----------
text : string
The text to wrap
width : int
The width of the entire box, including the perimeter and
the indentation space. Because the
wrapped text is padded with an additional column of whitespace on each
side, the minimum width is 5--any width less than that is
is automatically increased to 5 (default: 78)
boxchar : string
(No pun intended.) The character to draw the box with. May also
be a string of multiple characters (default: '#')
indent : int
Amount of space by which the box should be indented. (default: 0)
"""
min_width = len(boxchar) * 2 + 3
width = max(width-indent, min_width)
indentspace = indent * ' '
wrap_width = width - min_width + 1
q, r = divmod(width, len(boxchar))
# The top/bottom border
top_border = indentspace + boxchar * q + boxchar[:r]
top_padding = indentspace + boxchar + ' ' * (width - len(boxchar) * 2) + boxchar
lines = ['%s%s %s %s' % (indentspace, boxchar, line.ljust(wrap_width),
boxchar)
for line in wrap(text, wrap_width)]
top = [top_border, top_padding]
bottom = [top_padding, top_border]
return '\n'.join(top + lines + bottom)
|
Entrypoint function.
def main():
"""Entrypoint function."""
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username',
help='Hydro Quebec username')
parser.add_argument('-p', '--password',
help='Password')
parser.add_argument('-j', '--json', action='store_true',
default=False, help='Json output')
parser.add_argument('-i', '--influxdb', action='store_true',
default=False, help='InfluxDb output')
parser.add_argument('-c', '--contract',
default=None, help='Contract number')
parser.add_argument('-l', '--list-contracts', action='store_true',
default=False, help='List all your contracts')
parser.add_argument('-H', '--hourly', action='store_true',
default=False, help='Show yesterday hourly consumption')
parser.add_argument('-t', '--timeout',
default=REQUESTS_TIMEOUT, help='Request timeout')
parser.add_argument('-V', '--version', action='store_true',
default=False, help='Show version')
raw_group = parser.add_argument_group('Detailled-energy raw download option')
raw_group.add_argument('--detailled-energy', action='store_true',
default=False, help='Get raw json output download')
raw_group.add_argument('--start-date',
default=(datetime.datetime.now(HQ_TIMEZONE) -
datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
help='Start date for detailled-output')
raw_group.add_argument('--end-date',
default=datetime.datetime.now(HQ_TIMEZONE).strftime("%Y-%m-%d"),
help="End date for detailled-output")
args = parser.parse_args()
if args.version:
print(VERSION)
return 0
if not args.username or not args.password:
parser.print_usage()
print("pyhydroquebec: error: the following arguments are required: "
"-u/--username, -p/--password")
return 3
client = HydroQuebecClient(args.username, args.password, args.timeout)
loop = asyncio.get_event_loop()
if args.detailled_energy is False:
async_func = client.fetch_data()
else:
start_date = datetime.datetime.strptime(args.start_date, '%Y-%m-%d')
end_date = datetime.datetime.strptime(args.end_date, '%Y-%m-%d')
async_func = client.fetch_data_detailled_energy_use(start_date,
end_date)
try:
fut = asyncio.wait([async_func])
loop.run_until_complete(fut)
except BaseException as exp:
print(exp)
return 1
finally:
close_fut = asyncio.wait([client.close_session()])
loop.run_until_complete(close_fut)
if not client.get_data():
return 2
if args.list_contracts:
print("Contracts: {}".format(", ".join(client.get_contracts())))
elif args.influxdb:
output_influx(client.get_data(args.contract))
elif args.json or args.detailled_energy:
output_json(client.get_data(args.contract))
else:
output_text(args.username, client.get_data(args.contract), args.hourly)
return 0
|
Calculate western easter
def easter(year):
'''Calculate western easter'''
# formula taken from http://aa.usno.navy.mil/faq/docs/easter.html
c = trunc(year / 100)
n = year - 19 * trunc(year / 19)
k = trunc((c - 17) / 25)
i = c - trunc(c / 4) - trunc((c - k) / 3) + (19 * n) + 15
i = i - 30 * trunc(i / 30)
i = i - trunc(i / 28) * (1 - trunc(i / 28) * trunc(29 / (i + 1)) * trunc((21 - n) / 11))
j = year + trunc(year / 4) + i + 2 - c + trunc(c / 4)
j = j - 7 * trunc(j / 7)
l = i - j
month = 3 + trunc((l + 40) / 44)
day = l + 28 - 31 * trunc(month / 4)
return year, int(month), int(day)
|
July 4th
def independence_day(year, observed=None):
'''July 4th'''
day = 4
if observed:
if calendar.weekday(year, JUL, 4) == SAT:
day = 3
if calendar.weekday(year, JUL, 4) == SUN:
day = 5
return (year, JUL, day)
|
in USA: 2nd Monday in Oct
Elsewhere: Oct 12
def columbus_day(year, country='usa'):
'''in USA: 2nd Monday in Oct
Elsewhere: Oct 12'''
if country == 'usa':
return nth_day_of_month(2, MON, OCT, year)
else:
return (year, OCT, 12)
|
USA: last Thurs. of November, Canada: 2nd Mon. of October
def thanksgiving(year, country='usa'):
'''USA: last Thurs. of November, Canada: 2nd Mon. of October'''
if country == 'usa':
if year in [1940, 1941]:
return nth_day_of_month(3, THU, NOV, year)
elif year == 1939:
return nth_day_of_month(4, THU, NOV, year)
else:
return nth_day_of_month(0, THU, NOV, year)
if country == 'canada':
return nth_day_of_month(2, MON, OCT, year)
|
Parameters
----------
y: 1D numpy array
The data to be fitted
x: 1D numpy array
The x values of the y array. x and y must
have the same shape.
weights: 1D numpy array, must have the same shape as x and y
weight values
Examples
--------
>>> import numpy as N
>>> from numpy.core import around
>>> x = N.array([-5, -4 ,-3 ,-2 ,-1, 0, 1, 2, 3, 4, 5])
>>> y = N.array([1, 5, 4, 7, 10, 8, 9, 13, 14, 13, 18])
>>> around(linefit(x,y), decimals=5)
array([9.27273, 1.43636])
>>> x = N.array([1.3,1.3,2.0,2.0,2.7,3.3,3.3,3.7,3.7,4.,4.,4.,4.7,4.7,5.,5.3,5.3,5.3,5.7,6.,6.,6.3,6.7])
>>> y = N.array([2.3,1.8,2.8,1.5,2.2,3.8,1.8,3.7,1.7,2.8,2.8,2.2,3.2,1.9,1.8,3.5,2.8,2.1,3.4,3.2,3.,3.,5.9])
>>> around(linefit(x,y), decimals=5)
array([1.42564, 0.31579])
def linefit(x, y, weights=None):
"""
Parameters
----------
y: 1D numpy array
The data to be fitted
x: 1D numpy array
The x values of the y array. x and y must
have the same shape.
weights: 1D numpy array, must have the same shape as x and y
weight values
Examples
--------
>>> import numpy as N
>>> from numpy.core import around
>>> x = N.array([-5, -4 ,-3 ,-2 ,-1, 0, 1, 2, 3, 4, 5])
>>> y = N.array([1, 5, 4, 7, 10, 8, 9, 13, 14, 13, 18])
>>> around(linefit(x,y), decimals=5)
array([9.27273, 1.43636])
>>> x = N.array([1.3,1.3,2.0,2.0,2.7,3.3,3.3,3.7,3.7,4.,4.,4.,4.7,4.7,5.,5.3,5.3,5.3,5.7,6.,6.,6.3,6.7])
>>> y = N.array([2.3,1.8,2.8,1.5,2.2,3.8,1.8,3.7,1.7,2.8,2.8,2.2,3.2,1.9,1.8,3.5,2.8,2.1,3.4,3.2,3.,3.,5.9])
>>> around(linefit(x,y), decimals=5)
array([1.42564, 0.31579])
"""
if len(x) != len(y):
print("Error: X and Y must have equal size\n")
return
n = len(x)
w = N.zeros((n,n)).astype(N.float)
if weights is None:
for i in N.arange(n):
w[i,i] = 1
else:
if len(weights) != n:
print("Error: Weights must have the same size as X and Y.\n")
return
for i in N.arange(n):
w[i,i] = weights[i]
x = x.astype(N.float)
y = y.astype(N.float)
# take the weighted avg for calculatiing the covarince
Xavg = N.sum(N.dot(w,x)) / N.sum(w.diagonal())
Yavg = N.sum(N.dot(w,y)) / N.sum(w.diagonal())
xm = x - Xavg
xmt = N.transpose(xm)
ym = y - Yavg
b1 = N.dot(xmt,N.dot(w,ym)) / N.dot(xmt ,N.dot(w,xm))
b0 = Yavg - b1 * Xavg
return b0, b1
|
Analyses the measurement with the given parameters
:param measurementId:
:return:
def get(self, measurementId):
"""
Analyses the measurement with the given parameters
:param measurementId:
:return:
"""
logger.info('Analysing ' + measurementId)
measurement = self._measurementController.getMeasurement(measurementId, MeasurementStatus.COMPLETE)
if measurement is not None:
if measurement.inflate():
data = {
name: {
'spectrum': {
'x': self._jsonify(data.spectrum('x')),
'y': self._jsonify(data.spectrum('y')),
'z': self._jsonify(data.spectrum('z')),
'sum': self._jsonify(data.spectrum('sum'))
},
'psd': {
'x': self._jsonify(data.psd('x')),
'y': self._jsonify(data.psd('y')),
'z': self._jsonify(data.psd('z'))
},
'peakSpectrum': {
'x': self._jsonify(data.peakSpectrum('x')),
'y': self._jsonify(data.peakSpectrum('y')),
'z': self._jsonify(data.peakSpectrum('z')),
'sum': self._jsonify(data.peakSpectrum('sum'))
}
}
for name, data in measurement.data.items()
}
return data, 200
else:
return None, 404
else:
return None, 404
|
compares the current device state against the targetStateProvider and issues updates as necessary to ensure the
device is
at that state.
:param md:
:param targetState: the target state.
:param httpclient: the http client
:return:
def _applyTargetState(targetState, md, httpclient):
"""
compares the current device state against the targetStateProvider and issues updates as necessary to ensure the
device is
at that state.
:param md:
:param targetState: the target state.
:param httpclient: the http client
:return:
"""
anyUpdate = False
if md['fs'] != targetState.fs:
logger.info("Updating fs from " + str(md['fs']) + " to " + str(targetState.fs) + " for " + md['name'])
anyUpdate = True
if md['samplesPerBatch'] != targetState.samplesPerBatch:
logger.info("Updating samplesPerBatch from " + str(md['samplesPerBatch']) + " to " + str(
targetState.samplesPerBatch) + " for " + md['name'])
anyUpdate = True
if md['gyroEnabled'] != targetState.gyroEnabled:
logger.info("Updating gyroEnabled from " + str(md['gyroEnabled']) + " to " + str(
targetState.gyroEnabled) + " for " + md['name'])
anyUpdate = True
if md['gyroSens'] != targetState.gyroSens:
logger.info(
"Updating gyroSens from " + str(md['gyroSens']) + " to " + str(targetState.gyroSens) + " for " + md[
'name'])
anyUpdate = True
if md['accelerometerEnabled'] != targetState.accelerometerEnabled:
logger.info("Updating accelerometerEnabled from " + str(md['accelerometerEnabled']) + " to " + str(
targetState.accelerometerEnabled) + " for " + md['name'])
anyUpdate = True
if md['accelerometerSens'] != targetState.accelerometerSens:
logger.info("Updating accelerometerSens from " + str(md['accelerometerSens']) + " to " + str(
targetState.accelerometerSens) + " for " + md['name'])
anyUpdate = True
if anyUpdate:
payload = marshal(targetState, targetStateFields)
logger.info("Applying target state change " + md['name'] + " - " + str(payload))
if RecordingDeviceStatus.INITIALISED.name == md.get('status'):
try:
httpclient.patch(md['serviceURL'], json=payload)
except Exception as e:
logger.exception(e)
else:
logger.warning("Ignoring target state change until " + md['name'] + " is idle, currently " + md['status'])
else:
logger.debug("Device " + md['name'] + " is at target state, we continue")
|
Updates the target state on the specified device.
:param targetState: the target state to reach.
:param device: the device to update.
:return:
def updateDeviceState(self, device):
"""
Updates the target state on the specified device.
:param targetState: the target state to reach.
:param device: the device to update.
:return:
"""
# this is only threadsafe because the targetstate is effectively immutable, if it becomes mutable in future then
# funkiness may result
self._reactor.offer(REACH_TARGET_STATE, [self._targetStateProvider.state, device, self._httpclient])
|
Updates the system target state and propagates that to all devices.
:param newState:
:return:
def updateTargetState(self, newState):
"""
Updates the system target state and propagates that to all devices.
:param newState:
:return:
"""
self._targetStateProvider.state = loadTargetState(newState, self._targetStateProvider.state)
for device in self.deviceController.getDevices():
self.updateDeviceState(device.payload)
|
Input ASCII trailer file "input" will be read.
The contents will then be written out to a FITS file in the same format
as used by 'stwfits' from IRAF.
Parameters
===========
input : str
Filename of input ASCII trailer file
width : int
Number of characters wide to use for defining output FITS column
[Default: 132]
output : str
Filename to use for writing out converted FITS trailer file
If None, input filename will be converted from *.tra -> *_trl.fits
[Default: None]
keep : bool
Specifies whether or not to keep any previously written FITS files
[Default: False]
def convert(input, width=132, output=None, keep=False):
"""Input ASCII trailer file "input" will be read.
The contents will then be written out to a FITS file in the same format
as used by 'stwfits' from IRAF.
Parameters
===========
input : str
Filename of input ASCII trailer file
width : int
Number of characters wide to use for defining output FITS column
[Default: 132]
output : str
Filename to use for writing out converted FITS trailer file
If None, input filename will be converted from *.tra -> *_trl.fits
[Default: None]
keep : bool
Specifies whether or not to keep any previously written FITS files
[Default: False]
"""
# open input trailer file
trl = open(input)
# process all lines
lines = np.array([i for text in trl.readlines() for i in textwrap.wrap(text,width=width)])
# close ASCII trailer file now that we have processed all the lines
trl.close()
if output is None:
# create fits file
rootname,suffix = os.path.splitext(input)
s = suffix[1:].replace('ra','rl')
fitsname = "{}_{}{}fits".format(rootname,s,os.path.extsep)
else:
fitsname = output
full_name = os.path.abspath(os.path.join(os.path.curdir,fitsname))
old_file = os.path.exists(full_name)
if old_file:
if keep:
print("ERROR: Trailer file already written out as: {}".format(full_name))
raise IOError
else:
os.remove(full_name)
# Build FITS table and write it out
line_fmt = "{}A".format(width)
tbhdu = fits.BinTableHDU.from_columns([fits.Column(name='TEXT_FILE',format=line_fmt,array=lines)])
tbhdu.writeto(fitsname)
print("Created output FITS filename for trailer:{} {}".format(os.linesep,full_name))
os.remove(input)
|
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return results
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return results
for (key, val) in res.items():
if val == True:
continue
if isinstance(cfg.get(key), dict):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return results
|
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
|
Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
|
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
|
Pops the first (key,val)
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
|
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
|
D.items() -> list of D's (key, value) pairs, as 2-tuples
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return list(zip((self.scalars + self.sections), list(self.values())))
|
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict() # doctest: +SKIP
>>> n == a # doctest: +SKIP
1
>>> n is a # doctest: +SKIP
0
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict() # doctest: +SKIP
>>> n == a # doctest: +SKIP
1
>>> n is a # doctest: +SKIP
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
|
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val
|
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
|
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
|
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, string_types):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
|
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
|
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
|
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
|
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(BOM, str) or not line.startswith(BOM):
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF8 - don't decode
if isinstance(infile, string_types):
return infile.splitlines(True)
else:
return infile
# UTF16 - have to decode
return self._decode(infile, encoding)
# No BOM discovered and no encoding specified, just return
if isinstance(infile, string_types):
# infile read from a file will be a single string
return infile.splitlines(True)
return infile
|
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, string_types):
# can't be unicode
# NOTE: Could raise a ``UnicodeDecodeError``
return infile.decode(encoding).splitlines(True)
for i, line in enumerate(infile):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
if PY3K:
if not isinstance(line, str):
infile[i] = line.decode(encoding)
else:
if not isinstance(line, unicode):
infile[i] = line.decode(encoding)
return infile
|
Decode element to unicode if necessary.
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if not self.encoding:
return line
if isinstance(line, str) and self.default_encoding:
return line.decode(self.default_encoding)
return line
|
Actually parse the config file.
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth at line %s.",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level at line %s.",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested at line %s.",
NestingError, infile, cur_index)
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name at line %s.',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
# it neither matched as a keyword
# or a section marker
self._handle_error(
'Invalid line at line "%s".',
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value at line %s.'
else:
msg = 'Parse error in value at line %s.'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value at line %s.',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name at line %s.',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
|
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
|
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = text % cur_index
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
|
Return an unquoted version of a value
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
|
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, string_types):
if self.stringify:
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
|
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
|
Extract the value, where we are in a multiline situation.
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
|
Parse the configspec.
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
|
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
|
Write an individual line, for the write method
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
|
Write a section marker line
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
|
Deal with a comment.
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
|
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename # doctest: +SKIP
>>> a.filename = 'test.ini' # doctest: +SKIP
>>> a.write() # doctest: +SKIP
>>> a.filename = filename # doctest: +SKIP
>>> a == ConfigObj('test.ini', raise_errors=True) # doctest: +SKIP
1
>>> import os # doctest: +SKIP
>>> os.remove('test.ini') # doctest: +SKIP
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename # doctest: +SKIP
>>> a.filename = 'test.ini' # doctest: +SKIP
>>> a.write() # doctest: +SKIP
>>> a.filename = filename # doctest: +SKIP
>>> a == ConfigObj('test.ini', raise_errors=True) # doctest: +SKIP
1
>>> import os # doctest: +SKIP
>>> os.remove('test.ini') # doctest: +SKIP
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, dict):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if self.encoding:
output = output.encode(self.encoding)
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output = BOM_UTF8 + output
if not output.endswith(newline):
output += newline
if outfile is not None:
outfile.write(output)
else:
# !!! write mode was 'wb' but that fails in PY3K and we dont need
h = open(self.filename, 'w')
h.write(output)
h.close()
|
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from .validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
|
Clear ConfigObj instance and restore to 'freshly created' state.
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
|
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, string_types):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
|
A dummy check method, always returns the value unchanged.
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
|
Get the command line arguments
Parameters: NONE
Returns:
files list of file specifications to be converted
outputFileNames list of output file specifications
(one per input file)
Default: a list of None values (one per input file)
conversionFormat string indicating the conversion format requested
Default: "mulitextension"
verbose flag indicating if verbose output is desired
Default: False
Exceptions: NONE
def _processCommandLineArgs():
"""
Get the command line arguments
Parameters: NONE
Returns:
files list of file specifications to be converted
outputFileNames list of output file specifications
(one per input file)
Default: a list of None values (one per input file)
conversionFormat string indicating the conversion format requested
Default: "mulitextension"
verbose flag indicating if verbose output is desired
Default: False
Exceptions: NONE
"""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "hvmo:",
["help",
"verbose",
"multiExtensionConversion",
"outputFileName"])
except getopt.GetoptError as e:
print(str(e))
_usage()
sys.exit(1)
conversionFormat = ""
outputFileNames = []
verbose = False
for o, a in opts:
if o in ("-h", "--help"):
_usage()
print(" Convert the waivered FITS Files (FILEs) to various formats.")
print(" The default conversion format is multi-extension FITS.")
print(" Options:")
print(" -h, --help display this help message and exit")
print(" -v, --verbose provide verbose output")
print(" -m, --multiExtensionConversion convert to multiExtension FITS format")
print(" -o, --outputFileName comma separated list of output file")
print(" specifications (one per input FILE)")
sys.exit()
if o in ("-v", "--verbose"):
verbose = True
if o in ("-m", "--multiExtensionConversion"):
if conversionFormat != "":
print("convertwaiveredfits.py: only one conversion format allowed")
_usage()
sys.exit(1)
conversionFormat = "multiExtension"
if o in ("-o", "--outputFileName"):
outputFileNames = a.split(',')
if conversionFormat == "":
#
# Set the default conversion format if none was provided
#
conversionFormat = "multiExtension"
if not args:
print("convertwaiveredfits.py: nothing to convert")
_usage()
sys.exit(1)
else:
files = args
if outputFileNames:
if len(files) != len(outputFileNames):
print("convertwaiveredfits.py: number of output file names does not match")
print(" the number of FILEs to convert")
_usage()
sys.exit(1)
else:
for i in range(0,len(files)):
outputFileNames.append(None)
return files,outputFileNames,conversionFormat,verbose
|
Verify that the input HDUList is for a waivered FITS file.
Parameters:
waiveredHdul HDUList object to be verified
Returns: None
Exceptions:
ValueError Input HDUList is not for a waivered FITS file
def _verify(waiveredHdul):
"""
Verify that the input HDUList is for a waivered FITS file.
Parameters:
waiveredHdul HDUList object to be verified
Returns: None
Exceptions:
ValueError Input HDUList is not for a waivered FITS file
"""
if len(waiveredHdul) == 2:
#
# There must be exactly 2 HDU's
#
if waiveredHdul[0].header['NAXIS'] > 0:
#
# The Primary HDU must have some data
#
if isinstance(waiveredHdul[1], fits.TableHDU):
#
# The Alternate HDU must be a TableHDU
#
if waiveredHdul[0].data.shape[0] == \
waiveredHdul[1].data.shape[0] or \
waiveredHdul[1].data.shape[0] == 1:
#
# The number of arrays in the Primary HDU must match
# the number of rows in the TableHDU. This includes
# the case where there is only a single array and row.
#
return
#
# Not a valid waivered Fits file
#
raise ValueError("Input object does not represent a valid waivered" + \
" FITS file")
|
Convert the input waivered FITS object to a multi-extension FITS
HDUList object. Generate an output multi-extension FITS file if
requested.
Parameters:
waiveredObject input object representing a waivered FITS file;
either a astroyp.io.fits.HDUList object, a file object, or a
file specification
outputFileName file specification for the output file
Default: None - do not generate an output file
forceFileOutput force the generation of an output file when the
outputFileName parameter is None; the output file
specification will be the same as the input file
specification with the last character of the base
name replaced with the character 'h'.
Default: False
verbose provide verbose output
Default: False
Returns:
mhdul an HDUList object in multi-extension FITS format.
Exceptions:
TypeError Input object is not a HDUList, a file object or a
file name
def toMultiExtensionFits(waiveredObject,
multiExtensionFileName=None,
forceFileOutput=False,
verbose=False):
"""
Convert the input waivered FITS object to a multi-extension FITS
HDUList object. Generate an output multi-extension FITS file if
requested.
Parameters:
waiveredObject input object representing a waivered FITS file;
either a astroyp.io.fits.HDUList object, a file object, or a
file specification
outputFileName file specification for the output file
Default: None - do not generate an output file
forceFileOutput force the generation of an output file when the
outputFileName parameter is None; the output file
specification will be the same as the input file
specification with the last character of the base
name replaced with the character 'h'.
Default: False
verbose provide verbose output
Default: False
Returns:
mhdul an HDUList object in multi-extension FITS format.
Exceptions:
TypeError Input object is not a HDUList, a file object or a
file name
"""
if isinstance(waiveredObject, fits.HDUList):
whdul = waiveredObject
inputObjectDescription = "HDUList object"
else:
try:
whdul = fits.open(waiveredObject)
if isinstance(waiveredObject, string_types):
inputObjectDescription = "file " + waiveredObject
else:
inputObjectDescription = "file " + waiveredObject.name
except TypeError:
raise TypeError("Input object must be HDUList, file object, " + \
"or file name")
_verify(whdul)
undesiredPrimaryHeaderKeywords = ['ORIGIN','FITSDATE','FILENAME',
'ALLG-MAX','ALLG-MIN','ODATTYPE',
'SDASMGNU','OPSIZE','CTYPE2',
'CD2_2','CD2_1','CD1_2','CTYPE3',
'CD3_3','CD3_1','CD1_3','CD2_3',
'CD3_2']
#
# Create the multi-extension primary header as a copy of the
# wavered file primary header
#
mPHeader = whdul[0].header
originalDataType = whdul[0].header.get('ODATTYPE','')
#
# Remove primary header cards with keywords matching the
# list of undesired primary header keywords
#
for keyword in undesiredPrimaryHeaderKeywords:
#
# Be careful only to delete the first card that matches
# the keyword, not all of the cards
#
if keyword in mPHeader:
del mPHeader[mPHeader.index(keyword)]
#
# Get the columns from the secondary HDU table
#
wcols = whdul[1].columns
#
# Remove primary header cards with keywords matching the
# column names in the secondary HDU table
#
for keyword in wcols.names:
if keyword in mPHeader:
del mPHeader[keyword]
#
# Create the PrimaryHDU
#
mPHdu = fits.PrimaryHDU(header=mPHeader)
#
# Add the EXTEND card
#
mPHdu.header.set('EXTEND', value=True, after='NAXIS')
#
# Add the NEXTEND card. There will be one extension
# for each row in the wavered Fits file table HDU.
#
mPHdu.header['NEXTEND'] = (whdul[1].data.shape[0],
'Number of standard extensions')
#
# Create the multi-extension file HDUList from the primary header
#
mhdul = fits.HDUList([mPHdu])
#
# Create the extension HDUs for the multi-extension file. There
# will be one extension for each row in the wavered file's table.
#
instrument = mPHeader.get('INSTRUME', '')
nrows = whdul[1].data.shape[0]
for i in range(0,nrows):
#
# Create the basic HDU from the data
#
if nrows == 1:
#
# Handle case where there is only one row in the table
#
data = whdul[0].data
else:
data = whdul[0].data[i]
mhdul.append(fits.ImageHDU(data))
#
# Add cards to the header for each keyword in the column
# names of the secondary HDU table from the wavered file
#
for keyword,format,unit in zip(wcols.names,wcols.formats,wcols.units):
if unit == 'LOGICAL-':
#
# Handle logical values
#
if whdul[1].data.field(keyword)[i].strip() == 'T':
d = True
else:
d = False
elif format[0] == 'E':
#
# Handle floating point values
#
fmt = '%'+format[1:]+'G'
d = eval(fmt % float(whdul[1].data.field(keyword)[i]))
else:
d = whdul[1].data.field(keyword)[i]
kw_descr = ""
if keyword in whdul[1].header:
kw_descr = whdul[1].header[keyword]
mhdul[i+1].header[keyword] = (d, kw_descr)
#
# If original data is unsigned short then scale the data.
#
if originalDataType == 'USHORT':
mhdul[i+1].scale('int16','',bscale=1,bzero=32768)
mhdul[i+1].header.set('BSCALE', value=1, before='BZERO')
#
# For WFPC2 and FOS instruments require additional header cards
#
if instrument in ('WFPC2','FOC'):
#
# Add EXTNAME card to header
#
mhdul[i+1].header['EXTNAME'] = (mPHeader.get('FILETYPE',''),
'extension name')
#
# Add EXTVER card to the header
#
mhdul[i+1]._extver = i+1
mhdul[i+1].header.set('EXTVER', value=i+1,
comment='extension version number',
after='EXTNAME')
#
# Add the EXPNAME card to the header
#
mhdul[i+1].header.set('EXPNAME',
mPHeader.get('ROOTNAME', ''),
'9 character exposure identifier',
before='EXTVER')
#
# Add the INHERIT card to the header.
#
mhdul[i+1].header.set('INHERIT', True,
'inherit the primary header',
after='EXTVER')
#
# Add the ROOTNAME card to the header
#
mhdul[i+1].header.set('ROOTNAME',
mPHeader.get('ROOTNAME', ''),
'rootname of the observationset',
after='INHERIT')
if not multiExtensionFileName and forceFileOutput:
base,ext = os.path.splitext(whdul[0]._file.name)
multiExtensionFileName = base[:-1]+'h'+ext
verboseString = "Input " + inputObjectDescription + \
" converted to multi-extension FITS format."
if multiExtensionFileName:
if instrument in ('WFPC2','FOC'):
#
# write the FILENAME card to the header for the WFPC2 and FOC
# instruments
#
head,tail = os.path.split(multiExtensionFileName)
mhdul[0].header.set('FILENAME', value=tail, after='NEXTEND')
if ASTROPY_VER_GE13:
mhdul.writeto(multiExtensionFileName, overwrite=True)
else:
mhdul.writeto(multiExtensionFileName, clobber=True)
verboseString = verboseString[:-1] + " and written to " + \
multiExtensionFileName + "."
if verbose:
print(verboseString)
return mhdul
|
Convert the input waivered FITS object to various formats. The
default conversion format is multi-extension FITS. Generate an output
file in the desired format if requested.
Parameters:
waiveredObject input object representing a waivered FITS file;
either a astropy.io.fits.HDUList object, a file object, or a
file specification
outputFileName file specification for the output file
Default: None - do not generate an output file
forceFileOutput force the generation of an output file when the
outputFileName parameter is None; the output file
specification will be the same as the input file
specification with the last character of the base
name replaced with the character `h` in
multi-extension FITS format.
Default: False
convertTo target conversion type
Default: 'multiExtension'
verbose provide verbose output
Default: False
Returns:
hdul an HDUList object in the requested format.
Exceptions:
ValueError Conversion type is unknown
def convertwaiveredfits(waiveredObject,
outputFileName=None,
forceFileOutput=False,
convertTo='multiExtension',
verbose=False):
"""
Convert the input waivered FITS object to various formats. The
default conversion format is multi-extension FITS. Generate an output
file in the desired format if requested.
Parameters:
waiveredObject input object representing a waivered FITS file;
either a astropy.io.fits.HDUList object, a file object, or a
file specification
outputFileName file specification for the output file
Default: None - do not generate an output file
forceFileOutput force the generation of an output file when the
outputFileName parameter is None; the output file
specification will be the same as the input file
specification with the last character of the base
name replaced with the character `h` in
multi-extension FITS format.
Default: False
convertTo target conversion type
Default: 'multiExtension'
verbose provide verbose output
Default: False
Returns:
hdul an HDUList object in the requested format.
Exceptions:
ValueError Conversion type is unknown
"""
if convertTo == 'multiExtension':
func = toMultiExtensionFits
else:
raise ValueError('Conversion type ' + convertTo + ' unknown')
return func(*(waiveredObject,outputFileName,forceFileOutput,verbose))
|
Determine Julian day from Persian date
def to_jd(year, month, day):
'''Determine Julian day from Persian date'''
if year >= 0:
y = 474
else:
y = 473
epbase = year - y
epyear = 474 + (epbase % 2820)
if month <= 7:
m = (month - 1) * 31
else:
m = (month - 1) * 30 + 6
return day + m + trunc(((epyear * 682) - 110) / 2816) + (epyear - 1) * 365 + trunc(epbase / 2820) * 1029983 + (EPOCH - 1)
|
Calculate Persian date from Julian day
def from_jd(jd):
'''Calculate Persian date from Julian day'''
jd = trunc(jd) + 0.5
depoch = jd - to_jd(475, 1, 1)
cycle = trunc(depoch / 1029983)
cyear = (depoch % 1029983)
if cyear == 1029982:
ycycle = 2820
else:
aux1 = trunc(cyear / 366)
aux2 = cyear % 366
ycycle = trunc(((2134 * aux1) + (2816 * aux2) + 2815) / 1028522) + aux1 + 1
year = ycycle + (2820 * cycle) + 474
if (year <= 0):
year -= 1
yday = (jd - to_jd(year, 1, 1)) + 1
if yday <= 186:
month = ceil(yday / 31)
else:
month = ceil((yday - 6) / 30)
day = int(jd - to_jd(year, month, 1)) + 1
return (year, month, day)
|
Initializes capture of stdout/stderr, Python warnings, and exceptions;
redirecting them to the loggers for the modules from which they originated.
def setup_global_logging():
"""
Initializes capture of stdout/stderr, Python warnings, and exceptions;
redirecting them to the loggers for the modules from which they originated.
"""
global global_logging_started
if not PY3K:
sys.exc_clear()
if global_logging_started:
return
orig_logger_class = logging.getLoggerClass()
logging.setLoggerClass(StreamTeeLogger)
try:
stdout_logger = logging.getLogger(__name__ + '.stdout')
stderr_logger = logging.getLogger(__name__ + '.stderr')
finally:
logging.setLoggerClass(orig_logger_class)
stdout_logger.setLevel(logging.INFO)
stderr_logger.setLevel(logging.ERROR)
stdout_logger.set_stream(sys.stdout)
stderr_logger.set_stream(sys.stderr)
sys.stdout = stdout_logger
sys.stderr = stderr_logger
exception_logger = logging.getLogger(__name__ + '.exc')
sys.excepthook = LoggingExceptionHook(exception_logger)
logging.captureWarnings(True)
rawinput = 'input' if PY3K else 'raw_input'
builtins._original_raw_input = getattr(builtins, rawinput)
setattr(builtins, rawinput, global_logging_raw_input)
global_logging_started = True
|
Disable global logging of stdio, warnings, and exceptions.
def teardown_global_logging():
"""Disable global logging of stdio, warnings, and exceptions."""
global global_logging_started
if not global_logging_started:
return
stdout_logger = logging.getLogger(__name__ + '.stdout')
stderr_logger = logging.getLogger(__name__ + '.stderr')
if sys.stdout is stdout_logger:
sys.stdout = sys.stdout.stream
if sys.stderr is stderr_logger:
sys.stderr = sys.stderr.stream
# If we still have an unhandled exception go ahead and handle it with the
# replacement excepthook before deleting it
exc_type, exc_value, exc_traceback = sys.exc_info()
if exc_type is not None:
sys.excepthook(exc_type, exc_value, exc_traceback)
del exc_type
del exc_value
del exc_traceback
if not PY3K:
sys.exc_clear()
del sys.excepthook
logging.captureWarnings(False)
rawinput = 'input' if PY3K else 'raw_input'
if hasattr(builtins, '_original_raw_input'):
setattr(builtins, rawinput, builtins._original_raw_input)
del builtins._original_raw_input
global_logging_started = False
|
Do basic configuration for the logging system. Similar to
logging.basicConfig but the logger ``name`` is configurable and both a file
output and a stream output can be created. Returns a logger object.
The default behaviour is to create a logger called ``name`` with a null
handled, and to use the "%(levelname)s: %(message)s" format string, and add
the handler to the ``name`` logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
:param name: Logger name
:param format: handler format string
:param datefmt: handler date/time format specifier
:param stream: add a StreamHandler using ``stream``
(None disables the stream, default=None)
:param level: logger level (default=INFO).
:param filename: add a FileHandler using ``filename`` (default=None)
:param filemode: open ``filename`` with specified filemode ('w' or 'a')
:param filelevel: logger level for file logger (default=``level``)
:param propagate: propagate message to parent (default=True)
:returns: logging.Logger object
def create_logger(name, format='%(levelname)s: %(message)s', datefmt=None,
stream=None, level=logging.INFO, filename=None, filemode='w',
filelevel=None, propagate=True):
"""
Do basic configuration for the logging system. Similar to
logging.basicConfig but the logger ``name`` is configurable and both a file
output and a stream output can be created. Returns a logger object.
The default behaviour is to create a logger called ``name`` with a null
handled, and to use the "%(levelname)s: %(message)s" format string, and add
the handler to the ``name`` logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
:param name: Logger name
:param format: handler format string
:param datefmt: handler date/time format specifier
:param stream: add a StreamHandler using ``stream``
(None disables the stream, default=None)
:param level: logger level (default=INFO).
:param filename: add a FileHandler using ``filename`` (default=None)
:param filemode: open ``filename`` with specified filemode ('w' or 'a')
:param filelevel: logger level for file logger (default=``level``)
:param propagate: propagate message to parent (default=True)
:returns: logging.Logger object
"""
# Get a logger for the specified name
logger = logging.getLogger(name)
logger.setLevel(level)
fmt = logging.Formatter(format, datefmt)
logger.propagate = propagate
# Remove existing handlers, otherwise multiple handlers can accrue
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
# Add handlers. Add NullHandler if no file or stream output so that
# modules don't emit a warning about no handler.
if not (filename or stream):
logger.addHandler(logging.NullHandler())
if filename:
hdlr = logging.FileHandler(filename, filemode)
if filelevel is None:
filelevel = level
hdlr.setLevel(filelevel)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
if stream:
hdlr = logging.StreamHandler(stream)
hdlr.setLevel(level)
hdlr.setFormatter(fmt)
logger.addHandler(hdlr)
return logger
|
Set the stream that this logger is meant to replace. Usually this will
be either `sys.stdout` or `sys.stderr`, but can be any object with
`write()` and `flush()` methods, as supported by
`logging.StreamHandler`.
def set_stream(self, stream):
"""
Set the stream that this logger is meant to replace. Usually this will
be either `sys.stdout` or `sys.stderr`, but can be any object with
`write()` and `flush()` methods, as supported by
`logging.StreamHandler`.
"""
for handler in self.handlers[:]:
if isinstance(handler, logging.StreamHandler):
self.handlers.remove(handler)
if stream is not None:
stream_handler = logging.StreamHandler(stream)
stream_handler.addFilter(_StreamHandlerEchoFilter())
stream_handler.setFormatter(logging.Formatter('%(message)s'))
self.addHandler(stream_handler)
self.stream = stream
|
Buffers each message until a newline is reached. Each complete line is
then published to the logging system through ``self.log()``.
def write(self, message):
"""
Buffers each message until a newline is reached. Each complete line is
then published to the logging system through ``self.log()``.
"""
self.__thread_local_ctx.write_count += 1
try:
if self.__thread_local_ctx.write_count > 1:
return
# For each line in the buffer ending with \n, output that line to
# the logger
msgs = (self.buffer + message).split('\n')
self.buffer = msgs.pop(-1)
for m in msgs:
self.log_orig(m, echo=True)
finally:
self.__thread_local_ctx.write_count -= 1
|
Returns the full-qualified module name, full pathname, line number, and
function in which `StreamTeeLogger.write()` was called. For example,
if this instance is used to replace `sys.stdout`, this will return the
location of any print statement.
def find_actual_caller(self):
"""
Returns the full-qualified module name, full pathname, line number, and
function in which `StreamTeeLogger.write()` was called. For example,
if this instance is used to replace `sys.stdout`, this will return the
location of any print statement.
"""
# Gleaned from code in the logging module itself...
try:
f = sys._getframe(1)
##f = inspect.currentframe(1)
except Exception:
f = None
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown module)", "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
mod = inspect.getmodule(f)
if mod is None:
modname = '__main__'
else:
modname = mod.__name__
if modname == __name__:
# Crawl back until the first frame outside of this module
f = f.f_back
continue
rv = (modname, filename, f.f_lineno, co.co_name)
break
return rv
|
Deserialize ``fp`` (a ``.read()``-supporting file-like object
containing a XPORT document) to a Python object.
def load(fp):
'''
Deserialize ``fp`` (a ``.read()``-supporting file-like object
containing a XPORT document) to a Python object.
'''
reader = reading.Reader(fp)
keys = reader.fields
columns = {k: [] for k in keys}
for row in reader:
for key, value in zip(keys, row):
columns[key].append(value)
return columns
|
Go to the login page.
def _get_login_page(self):
"""Go to the login page."""
try:
raw_res = yield from self._session.get(HOME_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not connect to login page")
# Get login url
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
form_node = soup.find('form', {'name': 'fm'})
if form_node is None:
raise PyHydroQuebecError("No login form find")
login_url = form_node.attrs.get('action')
if login_url is None:
raise PyHydroQuebecError("Can not found login url")
return login_url
|
Login to HydroQuebec website.
def _post_login_page(self, login_url):
"""Login to HydroQuebec website."""
data = {"login": self.username,
"_58_password": self.password}
try:
raw_res = yield from self._session.post(login_url,
data=data,
timeout=self._timeout,
allow_redirects=False)
except OSError:
raise PyHydroQuebecError("Can not submit login form")
if raw_res.status != 302:
raise PyHydroQuebecError("Login error: Bad HTTP status code. "
"Please check your username/password.")
return True
|
Get id of consumption profile.
def _get_p_p_id_and_contract(self):
"""Get id of consumption profile."""
contracts = {}
try:
raw_res = yield from self._session.get(PROFILE_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get profile page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
# Search contracts
for node in soup.find_all('span', {"class": "contrat"}):
rematch = re.match("C[a-z]* ([0-9]{4} [0-9]{5})", node.text)
if rematch is not None:
contracts[rematch.group(1).replace(" ", "")] = None
# search for links
for node in soup.find_all('a', {"class": "big iconLink"}):
for contract in contracts:
if contract in node.attrs.get('href'):
contracts[contract] = node.attrs.get('href')
# Looking for p_p_id
p_p_id = None
for node in soup.find_all('span'):
node_id = node.attrs.get('id', "")
if node_id.startswith("p_portraitConsommation_WAR"):
p_p_id = node_id[2:]
break
if p_p_id is None:
raise PyHydroQuebecError("Could not get p_p_id")
return p_p_id, contracts
|
Get contract number when we have only one contract.
def _get_lonely_contract(self):
"""Get contract number when we have only one contract."""
contracts = {}
try:
raw_res = yield from self._session.get(MAIN_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get main page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
info_node = soup.find("div", {"class": "span3 contrat"})
if info_node is None:
raise PyHydroQuebecError("Can not found contract")
research = re.search("Contrat ([0-9]{4} [0-9]{5})", info_node.text)
if research is not None:
contracts[research.group(1).replace(" ", "")] = None
if contracts == {}:
raise PyHydroQuebecError("Can not found contract")
return contracts
|
Get all balances.
.. todo::
IT SEEMS balances are shown (MAIN_URL) in the same order
that contracts in profile page (PROFILE_URL).
Maybe we should ensure that.
def _get_balances(self):
"""Get all balances.
.. todo::
IT SEEMS balances are shown (MAIN_URL) in the same order
that contracts in profile page (PROFILE_URL).
Maybe we should ensure that.
"""
balances = []
try:
raw_res = yield from self._session.get(MAIN_URL,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get main page")
# Parse html
content = yield from raw_res.text()
soup = BeautifulSoup(content, 'html.parser')
solde_nodes = soup.find_all("div", {"class": "solde-compte"})
if solde_nodes == []:
raise PyHydroQuebecError("Can not found balance")
for solde_node in solde_nodes:
try:
balance = solde_node.find("p").text
except AttributeError:
raise PyHydroQuebecError("Can not found balance")
balances.append(float(balance[:-2]
.replace(",", ".")
.replace("\xa0", "")))
return balances
|
Load the profile page of a specific contract when we have multiple contracts.
def _load_contract_page(self, contract_url):
"""Load the profile page of a specific contract when we have multiple contracts."""
try:
yield from self._session.get(contract_url,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get profile page for a "
"specific contract")
|
Get annual data.
def _get_annual_data(self, p_p_id):
"""Get annual data."""
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesConsommationAnnuelles"}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecAnnualError("Can not get annual data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get annual data")
if not json_output.get('success'):
raise PyHydroQuebecAnnualError("Could not get annual data")
if not json_output.get('results'):
raise PyHydroQuebecAnnualError("Could not get annual data")
if 'courant' not in json_output.get('results')[0]:
raise PyHydroQuebecAnnualError("Could not get annual data")
return json_output.get('results')[0]['courant']
|
Get monthly data.
def _get_monthly_data(self, p_p_id):
"""Get monthly data."""
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_resource_id": ("resourceObtenirDonnees"
"PeriodesConsommation")}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get monthly data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecError("Could not get monthly data")
if not json_output.get('success'):
raise PyHydroQuebecError("Could not get monthly data")
return json_output.get('results')
|
Get Hourly Data.
def _get_hourly_data(self, day_date, p_p_id):
"""Get Hourly Data."""
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesConsommationHoraires",
"p_p_cacheability": "cacheLevelPage",
"p_p_col_id": "column-2",
"p_p_col_count": 1,
"date": day_date,
}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get hourly data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get hourly data")
hourly_consumption_data = json_output['results']['listeDonneesConsoEnergieHoraire']
hourly_power_data = json_output['results']['listeDonneesConsoPuissanceHoraire']
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesMeteoHoraires",
"p_p_cacheability": "cacheLevelPage",
"p_p_col_id": "column-2",
"p_p_col_count": 1,
"dateDebut": day_date,
"dateFin": day_date,
}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get hourly data")
try:
json_output = yield from raw_res.json(content_type='text/json')
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get hourly data")
hourly_weather_data = []
if not json_output.get('results'):
# Missing Temperature data from Hydro-Quebec (but don't crash the app for that)
hourly_weather_data = [None]*24
else:
hourly_weather_data = json_output['results'][0]['listeTemperaturesHeure']
# Add temp in data
processed_hourly_data = [{'hour': data['heure'],
'lower': data['consoReg'],
'high': data['consoHaut'],
'total': data['consoTotal'],
'temp': hourly_weather_data[i]}
for i, data in enumerate(hourly_consumption_data)]
raw_hourly_data = {'Energy': hourly_consumption_data,
'Power': hourly_power_data,
'Weather': hourly_weather_data}
hourly_data = {'processed_hourly_data': processed_hourly_data,
'raw_hourly_data': raw_hourly_data}
return hourly_data
|
Get detailled energy use from a specific contract.
def fetch_data_detailled_energy_use(self, start_date=None, end_date=None):
"""Get detailled energy use from a specific contract."""
if start_date is None:
start_date = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
if end_date is None:
end_date = datetime.datetime.now(HQ_TIMEZONE)
# Get http session
yield from self._get_httpsession()
# Get login page
login_url = yield from self._get_login_page()
# Post login page
yield from self._post_login_page(login_url)
# Get p_p_id and contracts
p_p_id, contracts = yield from self._get_p_p_id_and_contract()
# If we don't have any contrats that means we have only
# onecontract. Let's get it
if contracts == {}:
contracts = yield from self._get_lonely_contract()
# For all contracts
for contract, contract_url in contracts.items():
if contract_url:
yield from self._load_contract_page(contract_url)
data = {}
dates = [(start_date + datetime.timedelta(n))
for n in range(int((end_date - start_date).days))]
for date in dates:
# Get Hourly data
day_date = date.strftime("%Y-%m-%d")
hourly_data = yield from self._get_hourly_data(day_date, p_p_id)
data[day_date] = hourly_data['raw_hourly_data']
# Add contract
self._data[contract] = data
|
Get the latest data from HydroQuebec.
def fetch_data(self):
"""Get the latest data from HydroQuebec."""
# Get http session
yield from self._get_httpsession()
# Get login page
login_url = yield from self._get_login_page()
# Post login page
yield from self._post_login_page(login_url)
# Get p_p_id and contracts
p_p_id, contracts = yield from self._get_p_p_id_and_contract()
# If we don't have any contrats that means we have only
# onecontract. Let's get it
if contracts == {}:
contracts = yield from self._get_lonely_contract()
# Get balance
balances = yield from self._get_balances()
balances_len = len(balances)
balance_id = 0
# For all contracts
for contract, contract_url in contracts.items():
if contract_url:
yield from self._load_contract_page(contract_url)
# Get Hourly data
try:
yesterday = datetime.datetime.now(HQ_TIMEZONE) - datetime.timedelta(days=1)
day_date = yesterday.strftime("%Y-%m-%d")
hourly_data = yield from self._get_hourly_data(day_date, p_p_id)
hourly_data = hourly_data['processed_hourly_data']
except Exception: # pylint: disable=W0703
# We don't have hourly data for some reason
hourly_data = {}
# Get Annual data
try:
annual_data = yield from self._get_annual_data(p_p_id)
except PyHydroQuebecAnnualError:
# We don't have annual data, which is possible if your
# contract is younger than 1 year
annual_data = {}
# Get Monthly data
monthly_data = yield from self._get_monthly_data(p_p_id)
monthly_data = monthly_data[0]
# Get daily data
start_date = monthly_data.get('dateDebutPeriode')
end_date = monthly_data.get('dateFinPeriode')
try:
daily_data = yield from self._get_daily_data(p_p_id, start_date, end_date)
except Exception: # pylint: disable=W0703
daily_data = []
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
daily_data = daily_data[0]['courant']
# format data
contract_data = {"balance": balances[balance_id]}
for key1, key2 in MONTHLY_MAP:
contract_data[key1] = monthly_data[key2]
for key1, key2 in ANNUAL_MAP:
contract_data[key1] = annual_data.get(key2, "")
# We have to test daily_data because it's empty
# At the end/starts of a period
if daily_data:
for key1, key2 in DAILY_MAP:
contract_data[key1] = daily_data[key2]
# Hourly
if hourly_data:
contract_data['yesterday_hourly_consumption'] = hourly_data
# Add contract
self._data[contract] = contract_data
balance_count = balance_id + 1
if balance_count < balances_len:
balance_id += 1
|
Return collected data.
def get_data(self, contract=None):
"""Return collected data."""
if contract is None:
return self._data
if contract in self._data.keys():
return {contract: self._data[contract]}
raise PyHydroQuebecError("Contract {} not found".format(contract))
|
Posts the current state of each device to the server and schedules the next call in n seconds.
:param serverURL:
:return:
def ping(self):
"""
Posts the current state of each device to the server and schedules the next call in n seconds.
:param serverURL:
:return:
"""
from datetime import datetime
nextRun = datetime.utcnow().timestamp() + self.cfg.getPingInterval()
self.sendHeartbeat()
self.scheduleNextHeartbeat(nextRun)
|
Posts the current state to the server.
:param serverURL: the URL to ping.
:return:
def sendHeartbeat(self):
"""
Posts the current state to the server.
:param serverURL: the URL to ping.
:return:
"""
for name, md in self.cfg.recordingDevices.items():
try:
data = marshal(md, recordingDeviceFields)
data['serviceURL'] = self.cfg.getServiceURL() + API_PREFIX + '/devices/' + name
targetURL = self.serverURL + API_PREFIX + '/devices/' + name
logger.info("Pinging " + targetURL)
resp = self.httpclient.put(targetURL, json=data)
if resp.status_code != 200:
logger.warning("Unable to ping server at " + targetURL + " with " + str(data.keys()) +
", response is " + str(resp.status_code))
else:
logger.info("Pinged server at " + targetURL + " with " + str(data.items()))
except:
logger.exception("Unable to ping server")
|
Schedules the next ping.
:param nextRun: when we should run next.
:param serverURL: the URL to ping.
:return:
def scheduleNextHeartbeat(self, nextRun):
"""
Schedules the next ping.
:param nextRun: when we should run next.
:param serverURL: the URL to ping.
:return:
"""
import threading
from datetime import datetime
tilNextTime = max(nextRun - datetime.utcnow().timestamp(), 0)
logging.getLogger('recorder').info("Scheduling next ping in " + str(round(tilNextTime, 3)) + " seconds")
threading.Timer(tilNextTime, self.ping).start()
|
Validate a type or matcher argument to the constructor.
def _validate_argument(self, arg):
"""Validate a type or matcher argument to the constructor."""
if arg is None:
return arg
if isinstance(arg, type):
return InstanceOf(arg)
if not isinstance(arg, BaseMatcher):
raise TypeError(
"argument of %s can be a type or a matcher (got %r)" % (
self.__class__.__name__, type(arg)))
return arg
|
Initiaize the mapping matcher with constructor arguments.
def _initialize(self, *args, **kwargs):
"""Initiaize the mapping matcher with constructor arguments."""
self.items = None
self.keys = None
self.values = None
if args:
if len(args) != 2:
raise TypeError("expected exactly two positional arguments, "
"got %s" % len(args))
if kwargs:
raise TypeError(
"expected positional or keyword arguments, not both")
# got positional arguments only
self.keys, self.values = map(self._validate_argument, args)
elif kwargs:
has_kv = 'keys' in kwargs and 'values' in kwargs
has_of = 'of' in kwargs
if not (has_kv or has_of):
raise TypeError("expected keys/values or items matchers, "
"but got: %s" % list(kwargs.keys()))
if has_kv and has_of:
raise TypeError(
"expected keys & values, or items matchers, not both")
if has_kv:
# got keys= and values= matchers
self.keys = self._validate_argument(kwargs['keys'])
self.values = self._validate_argument(kwargs['values'])
else:
# got of= matcher, which can be a tuple of matchers,
# or a single matcher for dictionary items
of = kwargs['of']
if isinstance(of, tuple):
try:
# got of= as tuple of matchers
self.keys, self.values = \
map(self._validate_argument, of)
except ValueError:
raise TypeError(
"of= tuple has to be a pair of matchers/types" % (
self.__class__.__name__,))
else:
# got of= as a single matcher
self.items = self._validate_argument(of)
|
Build the docs and show them in default web browser.
def docs(ctx, output='html', rebuild=False, show=True, verbose=True):
"""Build the docs and show them in default web browser."""
sphinx_build = ctx.run(
'sphinx-build -b {output} {all} {verbose} docs docs/_build'.format(
output=output,
all='-a -E' if rebuild else '',
verbose='-v' if verbose else ''))
if not sphinx_build.ok:
fatal("Failed to build the docs", cause=sphinx_build)
if show:
path = os.path.join(DOCS_OUTPUT_DIR, 'index.html')
if sys.platform == 'darwin':
path = 'file://%s' % os.path.abspath(path)
webbrowser.open_new_tab(path)
|
Upload the package to PyPI.
def upload(ctx, yes=False):
"""Upload the package to PyPI."""
import callee
version = callee.__version__
# check the packages version
# TODO: add a 'release' to automatically bless a version as release one
if version.endswith('-dev'):
fatal("Can't upload a development version (%s) to PyPI!", version)
# run the upload if it has been confirmed by the user
if not yes:
answer = input("Do you really want to upload to PyPI [y/N]? ")
yes = answer.strip().lower() == 'y'
if not yes:
logging.warning("Aborted -- not uploading to PyPI.")
return -2
logging.debug("Uploading version %s to PyPI...", version)
setup_py_upload = ctx.run('python setup.py sdist upload')
if not setup_py_upload.ok:
fatal("Failed to upload version %s to PyPI!", version,
cause=setup_py_upload)
logging.info("PyPI upload completed successfully.")
# add a Git tag and push
git_tag = ctx.run('git tag %s' % version)
if not git_tag.ok:
fatal("Failed to add a Git tag for uploaded version %s", version,
cause=git_tag)
git_push = ctx.run('git push && git push --tags')
if not git_push.ok:
fatal("Failed to push the release upstream.", cause=git_push)
|
Log an error message and exit.
Following arguments are keyword-only.
:param exitcode: Optional exit code to use
:param cause: Optional Invoke's Result object, i.e.
result of a subprocess invocation
def fatal(*args, **kwargs):
"""Log an error message and exit.
Following arguments are keyword-only.
:param exitcode: Optional exit code to use
:param cause: Optional Invoke's Result object, i.e.
result of a subprocess invocation
"""
# determine the exitcode to return to the operating system
exitcode = None
if 'exitcode' in kwargs:
exitcode = kwargs.pop('exitcode')
if 'cause' in kwargs:
cause = kwargs.pop('cause')
if not isinstance(cause, Result):
raise TypeError(
"invalid cause of fatal error: expected %r, got %r" % (
Result, type(cause)))
exitcode = exitcode or cause.return_code
logging.error(*args, **kwargs)
raise Exit(exitcode or -1)
|
This is the "atomic" function looped by other functions
def rungtd1d(time: Union[datetime, str, np.ndarray],
altkm: np.ndarray,
glat: float, glon: float) -> xarray.Dataset:
"""
This is the "atomic" function looped by other functions
"""
time = todt64(time)
# %% get solar parameters for date
f107Ap = gi.getApF107(time, smoothdays=81)
f107a = f107Ap['f107s'].item()
f107 = f107Ap['f107'].item()
Ap = f107Ap['Ap'].item()
# %% dimensions
altkm = np.atleast_1d(altkm)
assert altkm.ndim == 1
assert isinstance(glon, (int, float))
assert isinstance(glat, (int, float))
assert isinstance(time, np.datetime64) or (time.size == 1 and isinstance(
time[0], np.datetime64)), 'if you have multiple times, for loop over them'
# don't check ap, too complicated
assert isinstance(MASS, (float, int))
assert len(TSELECOPS) == 25
# %%
gtd7.tselec(TSELECOPS) # like the msis_driver example
iyd, utsec, stl = datetime2gtd(time, glon)
altkm = np.atleast_1d(altkm)
gtd7.meters(1) # makes output in m^-3 and kg/m^-3
# %%
if isinstance(Ap, (float, int)):
Ap = [Ap]*7 # even if SW(9) == 1 due to f2py needs for array
dens = np.empty((altkm.size, len(species)))
temp = np.empty((altkm.size, len(ttypes)))
for i, a in enumerate(altkm):
dens[i, :], temp[i, :] = gtd7.gtd7(iyd, utsec, a, glat, glon, stl, f107a, f107, Ap, MASS)
dsf = {k: (('time', 'alt_km', 'lat', 'lon'), v[None, :, None, None]) for (k, v) in zip(species, dens.T)}
dsf.update({'Tn': (('time', 'alt_km', 'lat', 'lon'), temp[:, 1][None, :, None, None]),
'Texo': (('time', 'alt_km', 'lat', 'lon'), temp[:, 0][None, :, None, None])})
atmos = xarray.Dataset(dsf,
coords={'time': time.astype(datetime), 'alt_km': altkm, 'lat': [glat], 'lon': [glon], },
attrs={'Ap': Ap, 'f107': f107, 'f107a': f107a,
'species': species})
return atmos
|
Call `form.save()` and super itself.
def form_valid(self, form):
""" Call `form.save()` and super itself. """
form.save()
return super(SubscriptionView, self).form_valid(form)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.