text
stringlengths
81
112k
Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes def objects_to_datetime64ns(data, dayfirst, yearfirst, utc=False, errors="raise", require_iso8601=False, allow_object=False): """ Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes """ assert errors in ["raise", "ignore", "coerce"] # if str-dtype, convert data = np.array(data, copy=False, dtype=np.object_) try: result, tz_parsed = tslib.array_to_datetime( data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601 ) except ValueError as e: try: values, tz_parsed = conversion.datetime_to_datetime64(data) # If tzaware, these values represent unix timestamps, so we # return them as i8 to distinguish from wall times return values.view('i8'), tz_parsed except (ValueError, TypeError): raise e if tz_parsed is not None: # We can take a shortcut since the datetime64 numpy array # is in UTC # Return i8 values to denote unix timestamps return result.view('i8'), tz_parsed elif is_datetime64_dtype(result): # returning M8[ns] denotes wall-times; since tz is None # the distinction is a thin one return result, tz_parsed elif is_object_dtype(result): # GH#23675 when called via `pd.to_datetime`, returning an object-dtype # array is allowed. When called via `pd.DatetimeIndex`, we can # only accept datetime64 dtype, so raise TypeError if object-dtype # is returned, as that indicates the values can be recognized as # datetimes but they have conflicting timezones/awareness if allow_object: return result, tz_parsed raise TypeError(result) else: # pragma: no cover # GH#23675 this TypeError should never be hit, whereas the TypeError # in the object-dtype branch above is reachable. raise TypeError(result)
Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed def maybe_convert_dtype(data, copy): """ Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """ if is_float_dtype(data): # Note: we must cast to datetime64[ns] here in order to treat these # as wall-times instead of UTC timestamps. data = data.astype(_NS_DTYPE) copy = False # TODO: deprecate this behavior to instead treat symmetrically # with integer dtypes. See discussion in GH#23675 elif is_timedelta64_dtype(data): warnings.warn("Passing timedelta64-dtype data is deprecated, will " "raise a TypeError in a future version", FutureWarning, stacklevel=5) data = data.view(_NS_DTYPE) elif is_period_dtype(data): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError("Passing PeriodDtype data is invalid. " "Use `data.to_timestamp()` instead") elif is_categorical_dtype(data): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_type(data) and not is_datetime64tz_dtype(data): # Includes categorical # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy
If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match def maybe_infer_tz(tz, inferred_tz): """ If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match """ if tz is None: tz = inferred_tz elif inferred_tz is None: pass elif not timezones.tz_compare(tz, inferred_tz): raise TypeError('data is already tz-aware {inferred_tz}, unable to ' 'set specified tz: {tz}' .format(inferred_tz=inferred_tz, tz=tz)) return tz
Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through def _validate_dt64_dtype(dtype): """ Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, np.dtype("M8")): # no precision, warn dtype = _NS_DTYPE msg = textwrap.dedent("""\ Passing in 'datetime64' dtype with no precision is deprecated and will raise in a future version. Please pass in 'datetime64[ns]' instead.""") warnings.warn(msg, FutureWarning, stacklevel=5) if ((isinstance(dtype, np.dtype) and dtype != _NS_DTYPE) or not isinstance(dtype, (np.dtype, DatetimeTZDtype))): raise ValueError("Unexpected value for 'dtype': '{dtype}'. " "Must be 'datetime64[ns]' or DatetimeTZDtype'." .format(dtype=dtype)) return dtype
If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch def validate_tz_from_dtype(dtype, tz): """ If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch """ if dtype is not None: if isinstance(dtype, str): try: dtype = DatetimeTZDtype.construct_from_string(dtype) except TypeError: # Things like `datetime64[ns]`, which is OK for the # constructors, but also nonsense, which should be validated # but not by us. We *do* allow non-existent tz errors to # go through pass dtz = getattr(dtype, 'tz', None) if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a dtype" " with a tz") tz = dtz if tz is not None and is_datetime64_dtype(dtype): # We also need to check for the case where the user passed a # tz-naive dtype (i.e. datetime64[ns]) if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a " "timezone-naive dtype (i.e. datetime64[ns])") return tz
If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree def _infer_tz_from_endpoints(start, end, tz): """ If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree """ try: inferred_tz = timezones.infer_tzinfo(start, end) except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): raise AssertionError("Inferred time zone not equal to passed " "time zone") elif inferred_tz is not None: tz = inferred_tz return tz
Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp def _maybe_localize_point(ts, is_none, is_not_none, freq, tz): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp """ # Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if isinstance(freq, Tick) or freq is None: localize_args = {'tz': tz, 'ambiguous': False} else: localize_args = {'tz': None} if is_none is None and is_not_none is not None: ts = ts.tz_localize(**localize_args) return ts
subtract DatetimeArray/Index or ndarray[datetime64] def _sub_datetime_arraylike(self, other): """subtract DatetimeArray/Index or ndarray[datetime64]""" if len(self) != len(other): raise ValueError("cannot add indices of unequal length") if isinstance(other, np.ndarray): assert is_datetime64_dtype(other) other = type(self)(other) if not self._has_same_tz(other): # require tz compat raise TypeError("{cls} subtraction must have the same " "timezones or no timezones" .format(cls=type(self).__name__)) self_i8 = self.asi8 other_i8 = other.asi8 arr_mask = self._isnan | other._isnan new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask) if self._hasnans or other._hasnans: new_values[arr_mask] = iNaT return new_values.view('timedelta64[ns]')
Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new DatetimeArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : DatetimeArray def _add_delta(self, delta): """ Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new DatetimeArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : DatetimeArray """ new_values = super()._add_delta(delta) return type(self)._from_sequence(new_values, tz=self.tz, freq='infer')
Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range(start='2014-08-01 09:00', ... freq='H', periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert('US/Central') DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='H') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H', ... periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='H') def tz_convert(self, tz): """ Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range(start='2014-08-01 09:00', ... freq='H', periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert('US/Central') DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='H') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H', ... periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='H') """ tz = timezones.maybe_get_tz(tz) if self.tz is None: # tz naive, use tz_localize raise TypeError('Cannot convert tz-naive timestamps, use ' 'tz_localize to localize') # No conversion since timestamps are all UTC to begin with dtype = tz_to_dtype(tz) return self._simple_new(self.asi8, dtype=dtype, freq=self.freq)
Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. This method takes a time zone (tz) naive Datetime Array/Index object and makes this time zone aware. It does not move the time to another time zone. Time zone localization helps to switch from time zone aware to time zone unaware objects. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone to convert timestamps to. Passing ``None`` will remove the time zone information preserving local time. ambiguous : 'infer', 'NaT', bool array, default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \ default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 errors : {'raise', 'coerce'}, default None - 'raise' will raise a NonExistentTimeError if a timestamp is not valid in the specified time zone (e.g. due to a transition from or to DST time). Use ``nonexistent='raise'`` instead. - 'coerce' will return NaT if the timestamp can not be converted to the specified time zone. Use ``nonexistent='NaT'`` instead. .. deprecated:: 0.24.0 Returns ------- Same type as self Array/Index converted to the specified time zone. Raises ------ TypeError If the Datetime Array/Index is tz-aware and tz is not None. See Also -------- DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from one time zone to another. Examples -------- >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) >>> tz_naive DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Localize DatetimeIndex in US/Eastern time zone: >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') >>> tz_aware DatetimeIndex(['2018-03-01 09:00:00-05:00', '2018-03-02 09:00:00-05:00', '2018-03-03 09:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq='D') With the ``tz=None``, we can remove the time zone information while keeping the local time (not converted to UTC): >>> tz_aware.tz_localize(None) DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.dt.tz_localize('CET', ambiguous='infer') 0 2018-10-28 01:30:00+02:00 1 2018-10-28 02:00:00+02:00 2 2018-10-28 02:30:00+02:00 3 2018-10-28 02:00:00+01:00 4 2018-10-28 02:30:00+01:00 5 2018-10-28 03:00:00+01:00 6 2018-10-28 03:30:00+01:00 dtype: datetime64[ns, CET] In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, Europe/Warsaw] If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 0 2015-03-29 01:59:59.999999999+01:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 0 2015-03-29 03:30:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', errors=None): """ Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. This method takes a time zone (tz) naive Datetime Array/Index object and makes this time zone aware. It does not move the time to another time zone. Time zone localization helps to switch from time zone aware to time zone unaware objects. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone to convert timestamps to. Passing ``None`` will remove the time zone information preserving local time. ambiguous : 'infer', 'NaT', bool array, default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \ default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 errors : {'raise', 'coerce'}, default None - 'raise' will raise a NonExistentTimeError if a timestamp is not valid in the specified time zone (e.g. due to a transition from or to DST time). Use ``nonexistent='raise'`` instead. - 'coerce' will return NaT if the timestamp can not be converted to the specified time zone. Use ``nonexistent='NaT'`` instead. .. deprecated:: 0.24.0 Returns ------- Same type as self Array/Index converted to the specified time zone. Raises ------ TypeError If the Datetime Array/Index is tz-aware and tz is not None. See Also -------- DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from one time zone to another. Examples -------- >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) >>> tz_naive DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Localize DatetimeIndex in US/Eastern time zone: >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') >>> tz_aware DatetimeIndex(['2018-03-01 09:00:00-05:00', '2018-03-02 09:00:00-05:00', '2018-03-03 09:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq='D') With the ``tz=None``, we can remove the time zone information while keeping the local time (not converted to UTC): >>> tz_aware.tz_localize(None) DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.dt.tz_localize('CET', ambiguous='infer') 0 2018-10-28 01:30:00+02:00 1 2018-10-28 02:00:00+02:00 2 2018-10-28 02:30:00+02:00 3 2018-10-28 02:00:00+01:00 4 2018-10-28 02:30:00+01:00 5 2018-10-28 03:00:00+01:00 6 2018-10-28 03:30:00+01:00 dtype: datetime64[ns, CET] In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, Europe/Warsaw] If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 0 2015-03-29 01:59:59.999999999+01:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 0 2015-03-29 03:30:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] """ if errors is not None: warnings.warn("The errors argument is deprecated and will be " "removed in a future release. Use " "nonexistent='NaT' or nonexistent='raise' " "instead.", FutureWarning) if errors == 'coerce': nonexistent = 'NaT' elif errors == 'raise': nonexistent = 'raise' else: raise ValueError("The errors argument must be either 'coerce' " "or 'raise'.") nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta): raise ValueError("The nonexistent argument must be one of 'raise'," " 'NaT', 'shift_forward', 'shift_backward' or" " a timedelta object") if self.tz is not None: if tz is None: new_dates = tzconversion.tz_convert(self.asi8, timezones.UTC, self.tz) else: raise TypeError("Already tz-aware, use tz_convert to convert.") else: tz = timezones.maybe_get_tz(tz) # Convert to UTC new_dates = conversion.tz_localize_to_utc( self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent, ) new_dates = new_dates.view(_NS_DTYPE) dtype = tz_to_dtype(tz) return self._simple_new(new_dates, dtype=dtype, freq=self.freq)
Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) def normalize(self): """ Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ if self.tz is None or timezones.is_utc(self.tz): not_null = ~self.isna() DAY_NS = ccalendar.DAY_SECONDS * 1000000000 new_values = self.asi8.copy() adjustment = (new_values[not_null] % DAY_NS) new_values[not_null] = new_values[not_null] - adjustment else: new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) return type(self)._from_sequence(new_values, freq='infer').tz_localize(self.tz)
Cast to PeriodArray/Index at a particular frequency. Converts DatetimeArray/Index to PeriodArray/Index. Parameters ---------- freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. Returns ------- PeriodArray/Index Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]', freq='M') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]', freq='D') def to_period(self, freq=None): """ Cast to PeriodArray/Index at a particular frequency. Converts DatetimeArray/Index to PeriodArray/Index. Parameters ---------- freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. Returns ------- PeriodArray/Index Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]', freq='M') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]', freq='D') """ from pandas.core.arrays import PeriodArray if self.tz is not None: warnings.warn("Converting to PeriodArray/Index representation " "will drop timezone information.", UserWarning) if freq is None: freq = self.freqstr or self.inferred_freq if freq is None: raise ValueError("You must pass a freq argument as " "current index has none.") freq = get_period_alias(freq) return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)
Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified freq. Used for vectorized offsets Parameters ---------- freq : Period frequency Returns ------- TimedeltaArray/Index def to_perioddelta(self, freq): """ Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified freq. Used for vectorized offsets Parameters ---------- freq : Period frequency Returns ------- TimedeltaArray/Index """ # TODO: consider privatizing (discussion in GH#23113) from pandas.core.arrays.timedeltas import TimedeltaArray i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view('m8[ns]') return TimedeltaArray(m8delta)
Return the month names of the DateTimeIndex with specified locale. .. versionadded:: 0.23.0 Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale. Returns ------- Index Index of month names. Examples -------- >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object') def month_name(self, locale=None): """ Return the month names of the DateTimeIndex with specified locale. .. versionadded:: 0.23.0 Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale. Returns ------- Index Index of month names. Examples -------- >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object') """ if self.tz is not None and not timezones.is_utc(self.tz): values = self._local_timestamps() else: values = self.asi8 result = fields.get_date_name_field(values, 'month_name', locale=locale) result = self._maybe_mask_results(result, fill_value=None) return result
Returns numpy array of datetime.time. The time part of the Timestamps. def time(self): """ Returns numpy array of datetime.time. The time part of the Timestamps. """ # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC if self.tz is not None and not timezones.is_utc(self.tz): timestamps = self._local_timestamps() else: timestamps = self.asi8 return tslib.ints_to_pydatetime(timestamps, box="time")
Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day def to_julian_date(self): """ Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day """ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm year = np.asarray(self.year) month = np.asarray(self.month) day = np.asarray(self.day) testarr = month < 3 year[testarr] -= 1 month[testarr] += 12 return (day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60.0 + self.second / 3600.0 + self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0)
Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """ current_module = 'pandas' previous_line = current_section = current_subsection = '' position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set('-'): current_section = previous_line continue if set(line) == set('~'): current_subsection = previous_line continue if line.startswith('.. currentmodule::'): current_module = line.replace('.. currentmodule::', '').strip() continue if line == '.. autosummary::': position = 'autosummary' continue if position == 'autosummary': if line == '': position = 'items' continue if position == 'items': if line == '': position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split('.'): func = getattr(func, part) yield ('.'.join([current_module, item]), func, current_section, current_subsection) previous_line = line
Validate the docstring. Parameters ---------- doc : Docstring A Docstring object with the given function name. Returns ------- tuple errors : list of tuple Errors occurred during validation. warnings : list of tuple Warnings occurred during validation. examples_errs : str Examples usage displayed along the error, otherwise empty string. Notes ----- The errors codes are defined as: - First two characters: Section where the error happens: * GL: Global (no section, like section ordering errors) * SS: Short summary * ES: Extended summary * PR: Parameters * RT: Returns * YD: Yields * RS: Raises * WN: Warns * SA: See Also * NT: Notes * RF: References * EX: Examples - Last two characters: Numeric error code inside the section For example, EX02 is the second codified error in the Examples section (which in this case is assigned to examples that do not pass the tests). The error codes, their corresponding error messages, and the details on how they are validated, are not documented more than in the source code of this function. def get_validation_data(doc): """ Validate the docstring. Parameters ---------- doc : Docstring A Docstring object with the given function name. Returns ------- tuple errors : list of tuple Errors occurred during validation. warnings : list of tuple Warnings occurred during validation. examples_errs : str Examples usage displayed along the error, otherwise empty string. Notes ----- The errors codes are defined as: - First two characters: Section where the error happens: * GL: Global (no section, like section ordering errors) * SS: Short summary * ES: Extended summary * PR: Parameters * RT: Returns * YD: Yields * RS: Raises * WN: Warns * SA: See Also * NT: Notes * RF: References * EX: Examples - Last two characters: Numeric error code inside the section For example, EX02 is the second codified error in the Examples section (which in this case is assigned to examples that do not pass the tests). The error codes, their corresponding error messages, and the details on how they are validated, are not documented more than in the source code of this function. """ errs = [] wrns = [] if not doc.raw_doc: errs.append(error('GL08')) return errs, wrns, '' if doc.start_blank_lines != 1: errs.append(error('GL01')) if doc.end_blank_lines != 1: errs.append(error('GL02')) if doc.double_blank_lines: errs.append(error('GL03')) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: errs.append(error('GL04', mentioned_private_classes=', '.join(mentioned_errs))) for line in doc.raw_doc.splitlines(): if re.match("^ *\t", line): errs.append(error('GL05', line_with_tabs=line.lstrip())) unexpected_sections = [section for section in doc.section_titles if section not in ALLOWED_SECTIONS] for section in unexpected_sections: errs.append(error('GL06', section=section, allowed_sections=', '.join(ALLOWED_SECTIONS))) correct_order = [section for section in ALLOWED_SECTIONS if section in doc.section_titles] if correct_order != doc.section_titles: errs.append(error('GL07', correct_sections=', '.join(correct_order))) if (doc.deprecated_with_directive and not doc.extended_summary.startswith('.. deprecated:: ')): errs.append(error('GL09')) if not doc.summary: errs.append(error('SS01')) else: if not doc.summary[0].isupper(): errs.append(error('SS02')) if doc.summary[-1] != '.': errs.append(error('SS03')) if doc.summary != doc.summary.lstrip(): errs.append(error('SS04')) elif (doc.is_function_or_method and doc.summary.split(' ')[0][-1] == 's'): errs.append(error('SS05')) if doc.num_summary_lines > 1: errs.append(error('SS06')) if not doc.extended_summary: wrns.append(('ES01', 'No extended summary found')) # PR01: Parameters not documented # PR02: Unknown parameters # PR03: Wrong parameters order errs += doc.parameter_mismatches for param in doc.doc_parameters: if not param.startswith("*"): # Check can ignore var / kwargs if not doc.parameter_type(param): if ':' in param: errs.append(error('PR10', param_name=param.split(':')[0])) else: errs.append(error('PR04', param_name=param)) else: if doc.parameter_type(param)[-1] == '.': errs.append(error('PR05', param_name=param)) common_type_errors = [('integer', 'int'), ('boolean', 'bool'), ('string', 'str')] for wrong_type, right_type in common_type_errors: if wrong_type in doc.parameter_type(param): errs.append(error('PR06', param_name=param, right_type=right_type, wrong_type=wrong_type)) if not doc.parameter_desc(param): errs.append(error('PR07', param_name=param)) else: if not doc.parameter_desc(param)[0].isupper(): errs.append(error('PR08', param_name=param)) if doc.parameter_desc(param)[-1] != '.': errs.append(error('PR09', param_name=param)) if doc.is_function_or_method: if not doc.returns: if doc.method_returns_something: errs.append(error('RT01')) else: if len(doc.returns) == 1 and doc.returns[0].name: errs.append(error('RT02')) for name_or_type, type_, desc in doc.returns: if not desc: errs.append(error('RT03')) else: desc = ' '.join(desc) if not desc[0].isupper(): errs.append(error('RT04')) if not desc.endswith('.'): errs.append(error('RT05')) if not doc.yields and 'yield' in doc.method_source: errs.append(error('YD01')) if not doc.see_also: wrns.append(error('SA01')) else: for rel_name, rel_desc in doc.see_also.items(): if rel_desc: if not rel_desc.endswith('.'): errs.append(error('SA02', reference_name=rel_name)) if not rel_desc[0].isupper(): errs.append(error('SA03', reference_name=rel_name)) else: errs.append(error('SA04', reference_name=rel_name)) if rel_name.startswith('pandas.'): errs.append(error('SA05', reference_name=rel_name, right_reference=rel_name[len('pandas.'):])) examples_errs = '' if not doc.examples: wrns.append(error('EX01')) else: examples_errs = doc.examples_errors if examples_errs: errs.append(error('EX02', doctest_log=examples_errs)) for err in doc.validate_pep8(): errs.append(error('EX03', error_code=err.error_code, error_message=err.message, times_happening=' ({} times)'.format(err.count) if err.count > 1 else '')) examples_source_code = ''.join(doc.examples_source_code) for wrong_import in ('numpy', 'pandas'): if 'import {}'.format(wrong_import) in examples_source_code: errs.append(error('EX04', imported_library=wrong_import)) return errs, wrns, examples_errs
Validate the docstring for the given func_name Parameters ---------- func_name : function Function whose docstring will be evaluated (e.g. pandas.read_csv). Returns ------- dict A dictionary containing all the information obtained from validating the docstring. def validate_one(func_name): """ Validate the docstring for the given func_name Parameters ---------- func_name : function Function whose docstring will be evaluated (e.g. pandas.read_csv). Returns ------- dict A dictionary containing all the information obtained from validating the docstring. """ doc = Docstring(func_name) errs, wrns, examples_errs = get_validation_data(doc) return {'type': doc.type, 'docstring': doc.clean_doc, 'deprecated': doc.deprecated, 'file': doc.source_file_name, 'file_line': doc.source_file_def_line, 'github_link': doc.github_url, 'errors': errs, 'warnings': wrns, 'examples_errors': examples_errs}
Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. def validate_all(prefix, ignore_deprecated=False): """ Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. """ result = {} seen = {} # functions from the API docs api_doc_fnames = os.path.join( BASE_PATH, 'doc', 'source', 'reference', '*.rst') api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) for func_name, func_obj, section, subsection in api_items: if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info shared_code_key = doc_info['file'], doc_info['file_line'] shared_code = seen.get(shared_code_key, '') result[func_name].update({'in_api': True, 'section': section, 'subsection': subsection, 'shared_code_with': shared_code}) seen[shared_code_key] = func_name # functions from introspecting Series, DataFrame and Panel api_item_names = set(list(zip(*api_items))[0]) for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel): for member in inspect.getmembers(class_): func_name = 'pandas.{}.{}'.format(class_.__name__, member[0]) if (not member[0].startswith('_') and func_name not in api_item_names): if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info result[func_name]['in_api'] = False return result
Import Python object from its name as string. Parameters ---------- name : str Object name to import (e.g. pandas.Series.str.upper) Returns ------- object Python object that can be a class, method, function... Examples -------- >>> Docstring._load_obj('pandas.Series') <class 'pandas.core.series.Series'> def _load_obj(name): """ Import Python object from its name as string. Parameters ---------- name : str Object name to import (e.g. pandas.Series.str.upper) Returns ------- object Python object that can be a class, method, function... Examples -------- >>> Docstring._load_obj('pandas.Series') <class 'pandas.core.series.Series'> """ for maxsplit in range(1, name.count('.') + 1): # TODO when py3 only replace by: module, *func_parts = ... func_name_split = name.rsplit('.', maxsplit) module = func_name_split[0] func_parts = func_name_split[1:] try: obj = importlib.import_module(module) except ImportError: pass else: continue if 'obj' not in locals(): raise ImportError('No module can be imported ' 'from "{}"'.format(name)) for part in func_parts: obj = getattr(obj, part) return obj
Find the Python object that contains the source code of the object. This is useful to find the place in the source code (file and line number) where a docstring is defined. It does not currently work for all cases, but it should help find some (properties...). def _to_original_callable(obj): """ Find the Python object that contains the source code of the object. This is useful to find the place in the source code (file and line number) where a docstring is defined. It does not currently work for all cases, but it should help find some (properties...). """ while True: if inspect.isfunction(obj) or inspect.isclass(obj): f = inspect.getfile(obj) if f.startswith('<') and f.endswith('>'): return None return obj if inspect.ismethod(obj): obj = obj.__func__ elif isinstance(obj, functools.partial): obj = obj.func elif isinstance(obj, property): obj = obj.fget else: return None
File name where the object is implemented (e.g. pandas/core/frame.py). def source_file_name(self): """ File name where the object is implemented (e.g. pandas/core/frame.py). """ try: fname = inspect.getsourcefile(self.code_obj) except TypeError: # In some cases the object is something complex like a cython # object that can't be easily introspected. An it's better to # return the source code file of the object as None, than crash pass else: if fname: fname = os.path.relpath(fname, BASE_PATH) return fname
Check if the docstrings method can return something. Bare returns, returns valued None and returns from nested functions are disconsidered. Returns ------- bool Whether the docstrings method can return something. def method_returns_something(self): ''' Check if the docstrings method can return something. Bare returns, returns valued None and returns from nested functions are disconsidered. Returns ------- bool Whether the docstrings method can return something. ''' def get_returns_not_on_nested_functions(node): returns = [node] if isinstance(node, ast.Return) else [] for child in ast.iter_child_nodes(node): # Ignore nested functions and its subtrees. if not isinstance(child, ast.FunctionDef): child_returns = get_returns_not_on_nested_functions(child) returns.extend(child_returns) return returns tree = ast.parse(self.method_source).body if tree: returns = get_returns_not_on_nested_functions(tree[0]) return_values = [r.value for r in returns] # Replace NameConstant nodes valued None for None. for i, v in enumerate(return_values): if isinstance(v, ast.NameConstant) and v.value is None: return_values[i] = None return any(return_values) else: return False
Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format def _value_with_fmt(self, val): """Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format """ fmt = None if is_integer(val): val = int(val) elif is_float(val): val = float(val) elif is_bool(val): val = bool(val) elif isinstance(val, datetime): fmt = self.datetime_format elif isinstance(val, date): fmt = self.date_format elif isinstance(val, timedelta): val = val.total_seconds() / float(86400) fmt = '0' else: val = compat.to_str(val) return val, fmt
checks that path's extension against the Writer's supported extensions. If it isn't supported, raises UnsupportedFiletypeError. def check_extension(cls, ext): """checks that path's extension against the Writer's supported extensions. If it isn't supported, raises UnsupportedFiletypeError.""" if ext.startswith('.'): ext = ext[1:] if not any(ext in extension for extension in cls.supported_extensions): msg = ("Invalid extension for engine '{engine}': '{ext}'" .format(engine=pprint_thing(cls.engine), ext=pprint_thing(ext))) raise ValueError(msg) else: return True
Parse specified sheet(s) into a DataFrame Equivalent to read_excel(ExcelFile, ...) See the read_excel docstring for more info on accepted parameters def parse(self, sheet_name=0, header=0, names=None, index_col=None, usecols=None, squeeze=False, converters=None, true_values=None, false_values=None, skiprows=None, nrows=None, na_values=None, parse_dates=False, date_parser=None, thousands=None, comment=None, skipfooter=0, convert_float=True, mangle_dupe_cols=True, **kwds): """ Parse specified sheet(s) into a DataFrame Equivalent to read_excel(ExcelFile, ...) See the read_excel docstring for more info on accepted parameters """ # Can't use _deprecate_kwarg since sheetname=None has a special meaning if is_integer(sheet_name) and sheet_name == 0 and 'sheetname' in kwds: warnings.warn("The `sheetname` keyword is deprecated, use " "`sheet_name` instead", FutureWarning, stacklevel=2) sheet_name = kwds.pop("sheetname") elif 'sheetname' in kwds: raise TypeError("Cannot specify both `sheet_name` " "and `sheetname`. Use just `sheet_name`") if 'chunksize' in kwds: raise NotImplementedError("chunksize keyword of read_excel " "is not implemented") return self._reader.parse(sheet_name=sheet_name, header=header, names=names, index_col=index_col, usecols=usecols, squeeze=squeeze, converters=converters, true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, parse_dates=parse_dates, date_parser=date_parser, thousands=thousands, comment=comment, skipfooter=skipfooter, convert_float=convert_float, mangle_dupe_cols=mangle_dupe_cols, **kwds)
Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict). def _validate_where(w): """ Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict). """ if not (isinstance(w, (Expr, str)) or is_list_like(w)): raise TypeError("where must be passed as a string, Expr, " "or list-like of Exprs") return w
loose checking if s is a pytables-acceptable expression def maybe_expression(s): """ loose checking if s is a pytables-acceptable expression """ if not isinstance(s, str): return False ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',) # make sure we have an op at least return any(op in s for op in ops)
inplace conform rhs def conform(self, rhs): """ inplace conform rhs """ if not is_list_like(rhs): rhs = [rhs] if isinstance(rhs, np.ndarray): rhs = rhs.ravel() return rhs
create and return the op string for this TermValue def generate(self, v): """ create and return the op string for this TermValue """ val = v.tostring(self.encoding) return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
convert the expression that is in the term to something that is accepted by pytables def convert_value(self, v): """ convert the expression that is in the term to something that is accepted by pytables """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) kind = _ensure_decoded(self.kind) meta = _ensure_decoded(self.meta) if kind == 'datetime64' or kind == 'datetime': if isinstance(v, (int, float)): v = stringify(v) v = _ensure_decoded(v) v = Timestamp(v) if v.tz is not None: v = v.tz_convert('UTC') return TermValue(v, v.value, kind) elif kind == 'timedelta64' or kind == 'timedelta': v = Timedelta(v, unit='s').value return TermValue(int(v), v, kind) elif meta == 'category': metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata # check that metadata contains v if not result and v not in metadata: result = -1 return TermValue(result, result, 'integer') elif kind == 'integer': v = int(float(v)) return TermValue(v, v, kind) elif kind == 'float': v = float(v) return TermValue(v, v, kind) elif kind == 'bool': if isinstance(v, str): v = not v.strip().lower() in ['false', 'f', 'no', 'n', 'none', '0', '[]', '{}', ''] else: v = bool(v) return TermValue(v, v, kind) elif isinstance(v, str): # string quoting return TermValue(v, stringify(v), 'string') else: raise TypeError("Cannot compare {v} of type {typ} to {kind} column" .format(v=v, typ=type(v), kind=kind))
invert the filter def invert(self): """ invert the filter """ if self.filter is not None: f = list(self.filter) f[1] = self.generate_filter_op(invert=True) self.filter = tuple(f) return self
create and return the numexpr condition and filter def evaluate(self): """ create and return the numexpr condition and filter """ try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid condition".format(expr=self.expr, slf=self)) try: self.filter = self.terms.prune(FilterBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid filter".format(expr=self.expr, slf=self)) return self.condition, self.filter
quote the string if not encoded else encode and return def tostring(self, encoding): """ quote the string if not encoded else encode and return """ if self.kind == 'string': if encoding is not None: return self.converted return '"{converted}"'.format(converted=self.converted) elif self.kind == 'float': # python 2 str(float) is not always # round-trippable so use repr() return repr(self.converted) return self.converted
if we have bytes, decode them to unicode def _ensure_decoded(s): """ if we have bytes, decode them to unicode """ if isinstance(s, (np.bytes_, bytes)): s = s.decode(pd.get_option('display.encoding')) return s
wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32) argument limit def _result_type_many(*arrays_and_dtypes): """ wrapper around numpy.result_type which overcomes the NPY_MAXARGS (32) argument limit """ try: return np.result_type(*arrays_and_dtypes) except ValueError: # we have > NPY_MAXARGS terms in our expression return reduce(np.result_type, arrays_and_dtypes)
If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean def validate_argmin_with_skipna(skipna, args, kwargs): """ If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean """ skipna, args = process_skipna(skipna, args) validate_argmin(args, kwargs) return skipna
If 'Series.argmax' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean def validate_argmax_with_skipna(skipna, args, kwargs): """ If 'Series.argmax' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean """ skipna, args = process_skipna(skipna, args) validate_argmax(args, kwargs) return skipna
If 'Categorical.argsort' is called via the 'numpy' library, the first parameter in its signature is 'axis', which takes either an integer or 'None', so check if the 'ascending' parameter has either integer type or is None, since 'ascending' itself should be a boolean def validate_argsort_with_ascending(ascending, args, kwargs): """ If 'Categorical.argsort' is called via the 'numpy' library, the first parameter in its signature is 'axis', which takes either an integer or 'None', so check if the 'ascending' parameter has either integer type or is None, since 'ascending' itself should be a boolean """ if is_integer(ascending) or ascending is None: args = (ascending,) + args ascending = True validate_argsort_kind(args, kwargs, max_fname_arg_count=3) return ascending
If 'NDFrame.clip' is called via the numpy library, the third parameter in its signature is 'out', which can takes an ndarray, so check if the 'axis' parameter is an instance of ndarray, since 'axis' itself should either be an integer or None def validate_clip_with_axis(axis, args, kwargs): """ If 'NDFrame.clip' is called via the numpy library, the third parameter in its signature is 'out', which can takes an ndarray, so check if the 'axis' parameter is an instance of ndarray, since 'axis' itself should either be an integer or None """ if isinstance(axis, ndarray): args = (axis,) + args axis = None validate_clip(args, kwargs) return axis
If this function is called via the 'numpy' library, the third parameter in its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so check if the 'skipna' parameter is a boolean or not def validate_cum_func_with_skipna(skipna, args, kwargs, name): """ If this function is called via the 'numpy' library, the third parameter in its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so check if the 'skipna' parameter is a boolean or not """ if not is_bool(skipna): args = (skipna,) + args skipna = True validate_cum_func(args, kwargs, fname=name) return skipna
If this function is called via the 'numpy' library, the third parameter in its signature is 'axis', which takes either an ndarray or 'None', so check if the 'convert' parameter is either an instance of ndarray or is None def validate_take_with_convert(convert, args, kwargs): """ If this function is called via the 'numpy' library, the third parameter in its signature is 'axis', which takes either an ndarray or 'None', so check if the 'convert' parameter is either an instance of ndarray or is None """ if isinstance(convert, ndarray) or convert is None: args = (convert,) + args convert = True validate_take(args, kwargs, max_fname_arg_count=3, method='both') return convert
'args' and 'kwargs' should be empty, except for allowed kwargs because all of their necessary parameters are explicitly listed in the function signature def validate_groupby_func(name, args, kwargs, allowed=None): """ 'args' and 'kwargs' should be empty, except for allowed kwargs because all of their necessary parameters are explicitly listed in the function signature """ if allowed is None: allowed = [] kwargs = set(kwargs) - set(allowed) if len(args) + len(kwargs) > 0: raise UnsupportedFunctionCall(( "numpy operations are not valid " "with groupby. Use .groupby(...)." "{func}() instead".format(func=name)))
'args' and 'kwargs' should be empty because all of their necessary parameters are explicitly listed in the function signature def validate_resampler_func(method, args, kwargs): """ 'args' and 'kwargs' should be empty because all of their necessary parameters are explicitly listed in the function signature """ if len(args) + len(kwargs) > 0: if method in RESAMPLER_NUMPY_OPS: raise UnsupportedFunctionCall(( "numpy operations are not valid " "with resample. Use .resample(...)." "{func}() instead".format(func=method))) else: raise TypeError("too many arguments passed in")
Ensure that the axis argument passed to min, max, argmin, or argmax is zero or None, as otherwise it will be incorrectly ignored. Parameters ---------- axis : int or None Raises ------ ValueError def validate_minmax_axis(axis): """ Ensure that the axis argument passed to min, max, argmin, or argmax is zero or None, as otherwise it will be incorrectly ignored. Parameters ---------- axis : int or None Raises ------ ValueError """ ndim = 1 # hard-coded for Index if axis is None: return if axis >= ndim or (axis < 0 and ndim + axis < 0): raise ValueError("`axis` must be fewer than the number of " "dimensions ({ndim})".format(ndim=ndim))
msgpack (serialize) object to input file path THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. Parameters ---------- path_or_buf : string File path, buffer-like, or None if None, return generated string args : an object or objects to serialize encoding : encoding for unicode objects append : boolean whether to append to an existing msgpack (default is False) compress : type of compressor (zlib or blosc), default to None (no compression) def to_msgpack(path_or_buf, *args, **kwargs): """ msgpack (serialize) object to input file path THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. Parameters ---------- path_or_buf : string File path, buffer-like, or None if None, return generated string args : an object or objects to serialize encoding : encoding for unicode objects append : boolean whether to append to an existing msgpack (default is False) compress : type of compressor (zlib or blosc), default to None (no compression) """ global compressor compressor = kwargs.pop('compress', None) append = kwargs.pop('append', None) if append: mode = 'a+b' else: mode = 'wb' def writer(fh): for a in args: fh.write(pack(a, **kwargs)) path_or_buf = _stringify_path(path_or_buf) if isinstance(path_or_buf, str): with open(path_or_buf, mode) as fh: writer(fh) elif path_or_buf is None: buf = BytesIO() writer(buf) return buf.getvalue() else: writer(path_or_buf)
Load msgpack pandas object from the specified file path THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. Parameters ---------- path_or_buf : string File path, BytesIO like or string encoding : Encoding for decoding msgpack str type iterator : boolean, if True, return an iterator to the unpacker (default is False) Returns ------- obj : same type as object stored in file def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs): """ Load msgpack pandas object from the specified file path THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. Parameters ---------- path_or_buf : string File path, BytesIO like or string encoding : Encoding for decoding msgpack str type iterator : boolean, if True, return an iterator to the unpacker (default is False) Returns ------- obj : same type as object stored in file """ path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf) if iterator: return Iterator(path_or_buf) def read(fh): unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs)) if len(unpacked_obj) == 1: return unpacked_obj[0] if should_close: try: path_or_buf.close() except IOError: pass return unpacked_obj # see if we have an actual file if isinstance(path_or_buf, str): try: exists = os.path.exists(path_or_buf) except (TypeError, ValueError): exists = False if exists: with open(path_or_buf, 'rb') as fh: return read(fh) if isinstance(path_or_buf, bytes): # treat as a binary-like fh = None try: fh = BytesIO(path_or_buf) return read(fh) finally: if fh is not None: fh.close() elif hasattr(path_or_buf, 'read') and callable(path_or_buf.read): # treat as a buffer like return read(path_or_buf) raise ValueError('path_or_buf needs to be a string file path or file-like')
return my dtype mapping, whether number or name def dtype_for(t): """ return my dtype mapping, whether number or name """ if t in dtype_dict: return dtype_dict[t] return np.typeDict.get(t, t)
Convert strings to complex number instance with specified numpy type. def c2f(r, i, ctype_name): """ Convert strings to complex number instance with specified numpy type. """ ftype = c2f_dict[ctype_name] return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
convert the numpy values to a list def convert(values): """ convert the numpy values to a list """ dtype = values.dtype if is_categorical_dtype(values): return values elif is_object_dtype(dtype): return values.ravel().tolist() if needs_i8_conversion(dtype): values = values.view('i8') v = values.ravel() if compressor == 'zlib': _check_zlib() # return string arrays like they are if dtype == np.object_: return v.tolist() # convert to a bytes array v = v.tostring() return ExtType(0, zlib.compress(v)) elif compressor == 'blosc': _check_blosc() # return string arrays like they are if dtype == np.object_: return v.tolist() # convert to a bytes array v = v.tostring() return ExtType(0, blosc.compress(v, typesize=dtype.itemsize)) # ndarray (on original dtype) return ExtType(0, v.tostring())
Data encoder def encode(obj): """ Data encoder """ tobj = type(obj) if isinstance(obj, Index): if isinstance(obj, RangeIndex): return {'typ': 'range_index', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'start': getattr(obj, '_start', None), 'stop': getattr(obj, '_stop', None), 'step': getattr(obj, '_step', None)} elif isinstance(obj, PeriodIndex): return {'typ': 'period_index', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'freq': getattr(obj, 'freqstr', None), 'dtype': obj.dtype.name, 'data': convert(obj.asi8), 'compress': compressor} elif isinstance(obj, DatetimeIndex): tz = getattr(obj, 'tz', None) # store tz info and data as UTC if tz is not None: tz = tz.zone obj = obj.tz_convert('UTC') return {'typ': 'datetime_index', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'dtype': obj.dtype.name, 'data': convert(obj.asi8), 'freq': getattr(obj, 'freqstr', None), 'tz': tz, 'compress': compressor} elif isinstance(obj, (IntervalIndex, IntervalArray)): if isinstance(obj, IntervalIndex): typ = 'interval_index' else: typ = 'interval_array' return {'typ': typ, 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'left': getattr(obj, 'left', None), 'right': getattr(obj, 'right', None), 'closed': getattr(obj, 'closed', None)} elif isinstance(obj, MultiIndex): return {'typ': 'multi_index', 'klass': obj.__class__.__name__, 'names': getattr(obj, 'names', None), 'dtype': obj.dtype.name, 'data': convert(obj.values), 'compress': compressor} else: return {'typ': 'index', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'dtype': obj.dtype.name, 'data': convert(obj.values), 'compress': compressor} elif isinstance(obj, Categorical): return {'typ': 'category', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'codes': obj.codes, 'categories': obj.categories, 'ordered': obj.ordered, 'compress': compressor} elif isinstance(obj, Series): if isinstance(obj, SparseSeries): raise NotImplementedError( 'msgpack sparse series is not implemented' ) # d = {'typ': 'sparse_series', # 'klass': obj.__class__.__name__, # 'dtype': obj.dtype.name, # 'index': obj.index, # 'sp_index': obj.sp_index, # 'sp_values': convert(obj.sp_values), # 'compress': compressor} # for f in ['name', 'fill_value', 'kind']: # d[f] = getattr(obj, f, None) # return d else: return {'typ': 'series', 'klass': obj.__class__.__name__, 'name': getattr(obj, 'name', None), 'index': obj.index, 'dtype': obj.dtype.name, 'data': convert(obj.values), 'compress': compressor} elif issubclass(tobj, NDFrame): if isinstance(obj, SparseDataFrame): raise NotImplementedError( 'msgpack sparse frame is not implemented' ) # d = {'typ': 'sparse_dataframe', # 'klass': obj.__class__.__name__, # 'columns': obj.columns} # for f in ['default_fill_value', 'default_kind']: # d[f] = getattr(obj, f, None) # d['data'] = dict([(name, ss) # for name, ss in obj.items()]) # return d else: data = obj._data if not data.is_consolidated(): data = data.consolidate() # the block manager return {'typ': 'block_manager', 'klass': obj.__class__.__name__, 'axes': data.axes, 'blocks': [{'locs': b.mgr_locs.as_array, 'values': convert(b.values), 'shape': b.values.shape, 'dtype': b.dtype.name, 'klass': b.__class__.__name__, 'compress': compressor} for b in data.blocks] } elif isinstance(obj, (datetime, date, np.datetime64, timedelta, np.timedelta64)) or obj is NaT: if isinstance(obj, Timestamp): tz = obj.tzinfo if tz is not None: tz = tz.zone freq = obj.freq if freq is not None: freq = freq.freqstr return {'typ': 'timestamp', 'value': obj.value, 'freq': freq, 'tz': tz} if obj is NaT: return {'typ': 'nat'} elif isinstance(obj, np.timedelta64): return {'typ': 'timedelta64', 'data': obj.view('i8')} elif isinstance(obj, timedelta): return {'typ': 'timedelta', 'data': (obj.days, obj.seconds, obj.microseconds)} elif isinstance(obj, np.datetime64): return {'typ': 'datetime64', 'data': str(obj)} elif isinstance(obj, datetime): return {'typ': 'datetime', 'data': obj.isoformat()} elif isinstance(obj, date): return {'typ': 'date', 'data': obj.isoformat()} raise Exception( "cannot encode this datetimelike object: {obj}".format(obj=obj)) elif isinstance(obj, Period): return {'typ': 'period', 'ordinal': obj.ordinal, 'freq': obj.freqstr} elif isinstance(obj, Interval): return {'typ': 'interval', 'left': obj.left, 'right': obj.right, 'closed': obj.closed} elif isinstance(obj, BlockIndex): return {'typ': 'block_index', 'klass': obj.__class__.__name__, 'blocs': obj.blocs, 'blengths': obj.blengths, 'length': obj.length} elif isinstance(obj, IntIndex): return {'typ': 'int_index', 'klass': obj.__class__.__name__, 'indices': obj.indices, 'length': obj.length} elif isinstance(obj, np.ndarray): return {'typ': 'ndarray', 'shape': obj.shape, 'ndim': obj.ndim, 'dtype': obj.dtype.name, 'data': convert(obj), 'compress': compressor} elif isinstance(obj, np.number): if np.iscomplexobj(obj): return {'typ': 'np_scalar', 'sub_typ': 'np_complex', 'dtype': obj.dtype.name, 'real': obj.real.__repr__(), 'imag': obj.imag.__repr__()} else: return {'typ': 'np_scalar', 'dtype': obj.dtype.name, 'data': obj.__repr__()} elif isinstance(obj, complex): return {'typ': 'np_complex', 'real': obj.real.__repr__(), 'imag': obj.imag.__repr__()} return obj
Decoder for deserializing numpy data types. def decode(obj): """ Decoder for deserializing numpy data types. """ typ = obj.get('typ') if typ is None: return obj elif typ == 'timestamp': freq = obj['freq'] if 'freq' in obj else obj['offset'] return Timestamp(obj['value'], tz=obj['tz'], freq=freq) elif typ == 'nat': return NaT elif typ == 'period': return Period(ordinal=obj['ordinal'], freq=obj['freq']) elif typ == 'index': dtype = dtype_for(obj['dtype']) data = unconvert(obj['data'], dtype, obj.get('compress')) return Index(data, dtype=dtype, name=obj['name']) elif typ == 'range_index': return RangeIndex(obj['start'], obj['stop'], obj['step'], name=obj['name']) elif typ == 'multi_index': dtype = dtype_for(obj['dtype']) data = unconvert(obj['data'], dtype, obj.get('compress')) data = [tuple(x) for x in data] return MultiIndex.from_tuples(data, names=obj['names']) elif typ == 'period_index': data = unconvert(obj['data'], np.int64, obj.get('compress')) d = dict(name=obj['name'], freq=obj['freq']) freq = d.pop('freq', None) return PeriodIndex(PeriodArray(data, freq), **d) elif typ == 'datetime_index': data = unconvert(obj['data'], np.int64, obj.get('compress')) d = dict(name=obj['name'], freq=obj['freq']) result = DatetimeIndex(data, **d) tz = obj['tz'] # reverse tz conversion if tz is not None: result = result.tz_localize('UTC').tz_convert(tz) return result elif typ in ('interval_index', 'interval_array'): return globals()[obj['klass']].from_arrays(obj['left'], obj['right'], obj['closed'], name=obj['name']) elif typ == 'category': from_codes = globals()[obj['klass']].from_codes return from_codes(codes=obj['codes'], categories=obj['categories'], ordered=obj['ordered']) elif typ == 'interval': return Interval(obj['left'], obj['right'], obj['closed']) elif typ == 'series': dtype = dtype_for(obj['dtype']) pd_dtype = pandas_dtype(dtype) index = obj['index'] result = Series(unconvert(obj['data'], dtype, obj['compress']), index=index, dtype=pd_dtype, name=obj['name']) return result elif typ == 'block_manager': axes = obj['axes'] def create_block(b): values = _safe_reshape(unconvert( b['values'], dtype_for(b['dtype']), b['compress']), b['shape']) # locs handles duplicate column names, and should be used instead # of items; see GH 9618 if 'locs' in b: placement = b['locs'] else: placement = axes[0].get_indexer(b['items']) if is_datetime64tz_dtype(b['dtype']): assert isinstance(values, np.ndarray), type(values) assert values.dtype == 'M8[ns]', values.dtype values = DatetimeArray(values, dtype=b['dtype']) return make_block(values=values, klass=getattr(internals, b['klass']), placement=placement, dtype=b['dtype']) blocks = [create_block(b) for b in obj['blocks']] return globals()[obj['klass']](BlockManager(blocks, axes)) elif typ == 'datetime': return parse(obj['data']) elif typ == 'datetime64': return np.datetime64(parse(obj['data'])) elif typ == 'date': return parse(obj['data']).date() elif typ == 'timedelta': return timedelta(*obj['data']) elif typ == 'timedelta64': return np.timedelta64(int(obj['data'])) # elif typ == 'sparse_series': # dtype = dtype_for(obj['dtype']) # return SparseSeries( # unconvert(obj['sp_values'], dtype, obj['compress']), # sparse_index=obj['sp_index'], index=obj['index'], # fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name']) # elif typ == 'sparse_dataframe': # return SparseDataFrame( # obj['data'], columns=obj['columns'], # default_fill_value=obj['default_fill_value'], # default_kind=obj['default_kind'] # ) # elif typ == 'sparse_panel': # return SparsePanel( # obj['data'], items=obj['items'], # default_fill_value=obj['default_fill_value'], # default_kind=obj['default_kind']) elif typ == 'block_index': return globals()[obj['klass']](obj['length'], obj['blocs'], obj['blengths']) elif typ == 'int_index': return globals()[obj['klass']](obj['length'], obj['indices']) elif typ == 'ndarray': return unconvert(obj['data'], np.typeDict[obj['dtype']], obj.get('compress')).reshape(obj['shape']) elif typ == 'np_scalar': if obj.get('sub_typ') == 'np_complex': return c2f(obj['real'], obj['imag'], obj['dtype']) else: dtype = dtype_for(obj['dtype']) try: return dtype(obj['data']) except (ValueError, TypeError): return dtype.type(obj['data']) elif typ == 'np_complex': return complex(obj['real'] + '+' + obj['imag'] + 'j') elif isinstance(obj, (dict, list, set)): return obj else: return obj
Pack an object and return the packed bytes. def pack(o, default=encode, encoding='utf-8', unicode_errors='strict', use_single_float=False, autoreset=1, use_bin_type=1): """ Pack an object and return the packed bytes. """ return Packer(default=default, encoding=encoding, unicode_errors=unicode_errors, use_single_float=use_single_float, autoreset=autoreset, use_bin_type=use_bin_type).pack(o)
Unpack a packed object, return an iterator Note: packed lists will be returned as tuples def unpack(packed, object_hook=decode, list_hook=None, use_list=False, encoding='utf-8', unicode_errors='strict', object_pairs_hook=None, max_buffer_size=0, ext_hook=ExtType): """ Unpack a packed object, return an iterator Note: packed lists will be returned as tuples """ return Unpacker(packed, object_hook=object_hook, list_hook=list_hook, use_list=use_list, encoding=encoding, unicode_errors=unicode_errors, object_pairs_hook=object_pairs_hook, max_buffer_size=max_buffer_size, ext_hook=ext_hook)
Convert a JSON string to pandas object. Parameters ---------- path_or_buf : a valid JSON string or file-like, default: None The string could be a URL. Valid URL schemes include http, ftp, s3, gcs, and file. For file URLs, a host is expected. For instance, a local file could be ``file://localhost/path/to/table.json`` orient : string, Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. The set of possible orients is: - ``'split'`` : dict like ``{index -> [index], columns -> [columns], data -> [values]}`` - ``'records'`` : list like ``[{column -> value}, ... , {column -> value}]`` - ``'index'`` : dict like ``{index -> {column -> value}}`` - ``'columns'`` : dict like ``{column -> {index -> value}}`` - ``'values'`` : just the values array The allowed and default values depend on the value of the `typ` parameter. * when ``typ == 'series'``, - allowed orients are ``{'split','records','index'}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - allowed orients are ``{'split','records','index', 'columns','values', 'table'}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. .. versionadded:: 0.23.0 'table' as an allowed value for the ``orient`` argument typ : type of object to recover (series or frame), default 'frame' dtype : boolean or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. For all ``orient`` values except ``'table'``, default is True. .. versionchanged:: 0.25.0 Not applicable for ``orient='table'``. convert_axes : boolean, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. .. versionchanged:: 0.25.0 Not applicable for ``orient='table'``. convert_dates : boolean, default True List of columns to parse for dates; If True, then try to parse datelike columns default is True; a column label is datelike if * it ends with ``'_at'``, * it ends with ``'_time'``, * it begins with ``'timestamp'``, * it is ``'modified'``, or * it is ``'date'`` keep_default_dates : boolean, default True If parsing dates, then parse the default datelike columns numpy : boolean, default False Direct decoding to numpy arrays. Supports numeric data only, but non-numeric column and index labels are supported. Note also that the JSON ordering MUST be the same for each term if numpy=True. precise_float : boolean, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but less precise builtin functionality date_unit : string, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. .. versionadded:: 0.19.0 lines : boolean, default False Read the file as a json object per line. .. versionadded:: 0.19.0 chunksize : integer, default None Return JsonReader object for iteration. See the `line-delimted json docs <http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_ for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. .. versionadded:: 0.21.0 compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, zip or xz if path_or_buf is a string ending in '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression otherwise. If using 'zip', the ZIP file must contain only one data file to be read in. Set to None for no decompression. .. versionadded:: 0.21.0 Returns ------- result : Series or DataFrame, depending on the value of `typ`. See Also -------- DataFrame.to_json Notes ----- Specific to ``orient='table'``, if a :class:`DataFrame` with a literal :class:`Index` name of `index` gets written with :func:`to_json`, the subsequent read operation will incorrectly set the :class:`Index` name to ``None``. This is because `index` is also used by :func:`DataFrame.to_json` to denote a missing :class:`Index` name, and the subsequent :func:`read_json` operation cannot distinguish between the two. The same limitation is encountered with a :class:`MultiIndex` and any names beginning with ``'level_'``. Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') '{"columns":["col 1","col 2"], "index":["row 1","row 2"], "data":[["a","b"],["c","d"]]}' >>> pd.read_json(_, orient='split') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' >>> pd.read_json(_, orient='index') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' >>> pd.read_json(_, orient='records') col 1 col 2 0 a b 1 c d Encoding with Table Schema >>> df.to_json(orient='table') '{"schema": {"fields": [{"name": "index", "type": "string"}, {"name": "col 1", "type": "string"}, {"name": "col 2", "type": "string"}], "primaryKey": "index", "pandas_version": "0.20.0"}, "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, {"index": "row 2", "col 1": "c", "col 2": "d"}]}' def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, convert_axes=None, convert_dates=True, keep_default_dates=True, numpy=False, precise_float=False, date_unit=None, encoding=None, lines=False, chunksize=None, compression='infer'): """ Convert a JSON string to pandas object. Parameters ---------- path_or_buf : a valid JSON string or file-like, default: None The string could be a URL. Valid URL schemes include http, ftp, s3, gcs, and file. For file URLs, a host is expected. For instance, a local file could be ``file://localhost/path/to/table.json`` orient : string, Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. The set of possible orients is: - ``'split'`` : dict like ``{index -> [index], columns -> [columns], data -> [values]}`` - ``'records'`` : list like ``[{column -> value}, ... , {column -> value}]`` - ``'index'`` : dict like ``{index -> {column -> value}}`` - ``'columns'`` : dict like ``{column -> {index -> value}}`` - ``'values'`` : just the values array The allowed and default values depend on the value of the `typ` parameter. * when ``typ == 'series'``, - allowed orients are ``{'split','records','index'}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - allowed orients are ``{'split','records','index', 'columns','values', 'table'}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. .. versionadded:: 0.23.0 'table' as an allowed value for the ``orient`` argument typ : type of object to recover (series or frame), default 'frame' dtype : boolean or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. For all ``orient`` values except ``'table'``, default is True. .. versionchanged:: 0.25.0 Not applicable for ``orient='table'``. convert_axes : boolean, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. .. versionchanged:: 0.25.0 Not applicable for ``orient='table'``. convert_dates : boolean, default True List of columns to parse for dates; If True, then try to parse datelike columns default is True; a column label is datelike if * it ends with ``'_at'``, * it ends with ``'_time'``, * it begins with ``'timestamp'``, * it is ``'modified'``, or * it is ``'date'`` keep_default_dates : boolean, default True If parsing dates, then parse the default datelike columns numpy : boolean, default False Direct decoding to numpy arrays. Supports numeric data only, but non-numeric column and index labels are supported. Note also that the JSON ordering MUST be the same for each term if numpy=True. precise_float : boolean, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but less precise builtin functionality date_unit : string, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. .. versionadded:: 0.19.0 lines : boolean, default False Read the file as a json object per line. .. versionadded:: 0.19.0 chunksize : integer, default None Return JsonReader object for iteration. See the `line-delimted json docs <http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_ for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. .. versionadded:: 0.21.0 compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, zip or xz if path_or_buf is a string ending in '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression otherwise. If using 'zip', the ZIP file must contain only one data file to be read in. Set to None for no decompression. .. versionadded:: 0.21.0 Returns ------- result : Series or DataFrame, depending on the value of `typ`. See Also -------- DataFrame.to_json Notes ----- Specific to ``orient='table'``, if a :class:`DataFrame` with a literal :class:`Index` name of `index` gets written with :func:`to_json`, the subsequent read operation will incorrectly set the :class:`Index` name to ``None``. This is because `index` is also used by :func:`DataFrame.to_json` to denote a missing :class:`Index` name, and the subsequent :func:`read_json` operation cannot distinguish between the two. The same limitation is encountered with a :class:`MultiIndex` and any names beginning with ``'level_'``. Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') '{"columns":["col 1","col 2"], "index":["row 1","row 2"], "data":[["a","b"],["c","d"]]}' >>> pd.read_json(_, orient='split') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' >>> pd.read_json(_, orient='index') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' >>> pd.read_json(_, orient='records') col 1 col 2 0 a b 1 c d Encoding with Table Schema >>> df.to_json(orient='table') '{"schema": {"fields": [{"name": "index", "type": "string"}, {"name": "col 1", "type": "string"}, {"name": "col 2", "type": "string"}], "primaryKey": "index", "pandas_version": "0.20.0"}, "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """ if orient == 'table' and dtype: raise ValueError("cannot pass both dtype and orient='table'") if orient == 'table' and convert_axes: raise ValueError("cannot pass both convert_axes and orient='table'") if dtype is None and orient != 'table': dtype = True if convert_axes is None and orient != 'table': convert_axes = True compression = _infer_compression(path_or_buf, compression) filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, ) json_reader = JsonReader( filepath_or_buffer, orient=orient, typ=typ, dtype=dtype, convert_axes=convert_axes, convert_dates=convert_dates, keep_default_dates=keep_default_dates, numpy=numpy, precise_float=precise_float, date_unit=date_unit, encoding=encoding, lines=lines, chunksize=chunksize, compression=compression, ) if chunksize: return json_reader result = json_reader.read() if should_close: try: filepath_or_buffer.close() except: # noqa: flake8 pass return result
Try to format axes if they are datelike. def _format_axes(self): """ Try to format axes if they are datelike. """ if not self.obj.index.is_unique and self.orient in ( 'index', 'columns'): raise ValueError("DataFrame index must be unique for orient=" "'{orient}'.".format(orient=self.orient)) if not self.obj.columns.is_unique and self.orient in ( 'index', 'columns', 'records'): raise ValueError("DataFrame columns must be unique for orient=" "'{orient}'.".format(orient=self.orient))
At this point, the data either has a `read` attribute (e.g. a file object or a StringIO) or is a string that is a JSON document. If self.chunksize, we prepare the data for the `__next__` method. Otherwise, we read it into memory for the `read` method. def _preprocess_data(self, data): """ At this point, the data either has a `read` attribute (e.g. a file object or a StringIO) or is a string that is a JSON document. If self.chunksize, we prepare the data for the `__next__` method. Otherwise, we read it into memory for the `read` method. """ if hasattr(data, 'read') and not self.chunksize: data = data.read() if not hasattr(data, 'read') and self.chunksize: data = StringIO(data) return data
The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. def _get_data_from_filepath(self, filepath_or_buffer): """ The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. """ data = filepath_or_buffer exists = False if isinstance(data, str): try: exists = os.path.exists(filepath_or_buffer) # gh-5874: if the filepath is too long will raise here except (TypeError, ValueError): pass if exists or self.compression is not None: data, _ = _get_handle(filepath_or_buffer, 'r', encoding=self.encoding, compression=self.compression) self.should_close = True self.open_stream = data return data
Combines a list of JSON objects into one JSON object. def _combine_lines(self, lines): """ Combines a list of JSON objects into one JSON object. """ lines = filter(None, map(lambda x: x.strip(), lines)) return '[' + ','.join(lines) + ']'
Read the whole JSON input into a pandas object. def read(self): """ Read the whole JSON input into a pandas object. """ if self.lines and self.chunksize: obj = concat(self) elif self.lines: data = to_str(self.data) obj = self._get_object_parser( self._combine_lines(data.split('\n')) ) else: obj = self._get_object_parser(self.data) self.close() return obj
Parses a json document into a pandas object. def _get_object_parser(self, json): """ Parses a json document into a pandas object. """ typ = self.typ dtype = self.dtype kwargs = { "orient": self.orient, "dtype": self.dtype, "convert_axes": self.convert_axes, "convert_dates": self.convert_dates, "keep_default_dates": self.keep_default_dates, "numpy": self.numpy, "precise_float": self.precise_float, "date_unit": self.date_unit } obj = None if typ == 'frame': obj = FrameParser(json, **kwargs).parse() if typ == 'series' or obj is None: if not isinstance(dtype, bool): kwargs['dtype'] = dtype obj = SeriesParser(json, **kwargs).parse() return obj
Checks that dict has only the appropriate keys for orient='split'. def check_keys_split(self, decoded): """ Checks that dict has only the appropriate keys for orient='split'. """ bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: bad_keys = ", ".join(bad_keys) raise ValueError("JSON data had unexpected key(s): {bad_keys}" .format(bad_keys=pprint_thing(bad_keys)))
Try to convert axes. def _convert_axes(self): """ Try to convert axes. """ for axis in self.obj._AXIS_NUMBERS.keys(): new_axis, result = self._try_convert_data( axis, self.obj._get_axis(axis), use_dtypes=False, convert_dates=True) if result: setattr(self.obj, axis, new_axis)
Take a conversion function and possibly recreate the frame. def _process_converter(self, f, filt=None): """ Take a conversion function and possibly recreate the frame. """ if filt is None: filt = lambda col, c: True needs_new_obj = False new_obj = dict() for i, (col, c) in enumerate(self.obj.iteritems()): if filt(col, c): new_data, result = f(col, c) if result: c = new_data needs_new_obj = True new_obj[i] = c if needs_new_obj: # possibly handle dup columns new_obj = DataFrame(new_obj, index=self.obj.index) new_obj.columns = self.obj.columns self.obj = new_obj
Format an array for printing. Parameters ---------- values formatter float_format na_rep digits space justify decimal leading_space : bool, optional Whether the array should be formatted with a leading space. When an array as a column of a Series or DataFrame, we do want the leading space to pad between columns. When formatting an Index subclass (e.g. IntervalIndex._format_native_types), we don't want the leading space since it should be left-aligned. Returns ------- List[str] def format_array(values, formatter, float_format=None, na_rep='NaN', digits=None, space=None, justify='right', decimal='.', leading_space=None): """ Format an array for printing. Parameters ---------- values formatter float_format na_rep digits space justify decimal leading_space : bool, optional Whether the array should be formatted with a leading space. When an array as a column of a Series or DataFrame, we do want the leading space to pad between columns. When formatting an Index subclass (e.g. IntervalIndex._format_native_types), we don't want the leading space since it should be left-aligned. Returns ------- List[str] """ if is_datetime64_dtype(values.dtype): fmt_klass = Datetime64Formatter elif is_datetime64tz_dtype(values): fmt_klass = Datetime64TZFormatter elif is_timedelta64_dtype(values.dtype): fmt_klass = Timedelta64Formatter elif is_extension_array_dtype(values.dtype): fmt_klass = ExtensionArrayFormatter elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype): fmt_klass = FloatArrayFormatter elif is_integer_dtype(values.dtype): fmt_klass = IntArrayFormatter else: fmt_klass = GenericArrayFormatter if space is None: space = get_option("display.column_space") if float_format is None: float_format = get_option("display.float_format") if digits is None: digits = get_option("display.precision") fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep, float_format=float_format, formatter=formatter, space=space, justify=justify, decimal=decimal, leading_space=leading_space) return fmt_obj.get_result()
Outputs rounded and formatted percentiles. Parameters ---------- percentiles : list-like, containing floats from interval [0,1] Returns ------- formatted : list of strings Notes ----- Rounding precision is chosen so that: (1) if any two elements of ``percentiles`` differ, they remain different after rounding (2) no entry is *rounded* to 0% or 100%. Any non-integer is always rounded to at least 1 decimal place. Examples -------- Keeps all entries different after rounding: >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) ['1.999%', '2.001%', '50%', '66.667%', '99.99%'] No element is rounded to 0% or 100% (unless already equal to it). Duplicates are allowed: >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] def format_percentiles(percentiles): """ Outputs rounded and formatted percentiles. Parameters ---------- percentiles : list-like, containing floats from interval [0,1] Returns ------- formatted : list of strings Notes ----- Rounding precision is chosen so that: (1) if any two elements of ``percentiles`` differ, they remain different after rounding (2) no entry is *rounded* to 0% or 100%. Any non-integer is always rounded to at least 1 decimal place. Examples -------- Keeps all entries different after rounding: >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) ['1.999%', '2.001%', '50%', '66.667%', '99.99%'] No element is rounded to 0% or 100% (unless already equal to it). Duplicates are allowed: >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] """ percentiles = np.asarray(percentiles) # It checks for np.NaN as well with np.errstate(invalid='ignore'): if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \ or not np.all(percentiles <= 1): raise ValueError("percentiles should all be in the interval [0,1]") percentiles = 100 * percentiles int_idx = (percentiles.astype(int) == percentiles) if np.all(int_idx): out = percentiles.astype(int).astype(str) return [i + '%' for i in out] unique_pcts = np.unique(percentiles) to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None # Least precision that keeps percentiles unique after rounding prec = -np.floor(np.log10(np.min( np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end) ))).astype(int) prec = max(1, prec) out = np.empty_like(percentiles, dtype=object) out[int_idx] = percentiles[int_idx].astype(int).astype(str) out[~int_idx] = percentiles[~int_idx].round(prec).astype(str) return [i + '%' for i in out]
Return a formatter function for a range of timedeltas. These will all have the same format argument If box, then show the return in quotes def _get_format_timedelta64(values, nat_rep='NaT', box=False): """ Return a formatter function for a range of timedeltas. These will all have the same format argument If box, then show the return in quotes """ values_int = values.astype(np.int64) consider_values = values_int != iNaT one_day_nanos = (86400 * 1e9) even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0 all_sub_day = np.logical_and( consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0 if even_days: format = None elif all_sub_day: format = 'sub_day' else: format = 'long' def _formatter(x): if x is None or (is_scalar(x) and isna(x)): return nat_rep if not isinstance(x, Timedelta): x = Timedelta(x) result = x._repr_base(format=format) if box: result = "'{res}'".format(res=result) return result return _formatter
Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those. def _trim_zeros_complex(str_complexes, na_rep='NaN'): """ Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those. """ def separate_and_trim(str_complex, na_rep): num_arr = str_complex.split('+') return (_trim_zeros_float([num_arr[0]], na_rep) + ['+'] + _trim_zeros_float([num_arr[1][:-1]], na_rep) + ['j']) return [''.join(separate_and_trim(x, na_rep)) for x in str_complexes]
Trims zeros, leaving just one before the decimal points if need be. def _trim_zeros_float(str_floats, na_rep='NaN'): """ Trims zeros, leaving just one before the decimal points if need be. """ trimmed = str_floats def _is_number(x): return (x != na_rep and not x.endswith('inf')) def _cond(values): finite = [x for x in values if _is_number(x)] return (len(finite) > 0 and all(x.endswith('0') for x in finite) and not (any(('e' in x) or ('E' in x) for x in finite))) while _cond(trimmed): trimmed = [x[:-1] if _is_number(x) else x for x in trimmed] # leave one 0 after the decimal points if need be. return [x + "0" if x.endswith('.') and _is_number(x) else x for x in trimmed]
Alter default behavior on how float is formatted in DataFrame. Format float in engineering format. By accuracy, we mean the number of decimal digits after the floating point. See also EngFormatter. def set_eng_float_format(accuracy=3, use_eng_prefix=False): """ Alter default behavior on how float is formatted in DataFrame. Format float in engineering format. By accuracy, we mean the number of decimal digits after the floating point. See also EngFormatter. """ set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix)) set_option("display.column_space", max(12, accuracy + 9))
For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ---------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index). def get_level_lengths(levels, sentinel=''): """For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ---------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index). """ if len(levels) == 0: return [] control = [True] * len(levels[0]) result = [] for level in levels: last_index = 0 lengths = {} for i, key in enumerate(level): if control[i] and key == sentinel: pass else: control[i] = False lengths[last_index] = i - last_index last_index = i lengths[last_index] = len(level) - last_index result.append(lengths) return result
Appends lines to a buffer. Parameters ---------- buf The buffer to write to lines The lines to append. def buffer_put_lines(buf, lines): """ Appends lines to a buffer. Parameters ---------- buf The buffer to write to lines The lines to append. """ if any(isinstance(x, str) for x in lines): lines = [str(x) for x in lines] buf.write('\n'.join(lines))
Calculate display width considering unicode East Asian Width def len(self, text): """ Calculate display width considering unicode East Asian Width """ if not isinstance(text, str): return len(text) return sum(self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text)
Render a DataFrame to a list of columns (as lists of strings). def _to_str_columns(self): """ Render a DataFrame to a list of columns (as lists of strings). """ frame = self.tr_frame # may include levels names also str_index = self._get_formatted_index(frame) if not is_list_like(self.header) and not self.header: stringified = [] for i, c in enumerate(frame): fmt_values = self._format_col(i) fmt_values = _make_fixed_width(fmt_values, self.justify, minimum=(self.col_space or 0), adj=self.adj) stringified.append(fmt_values) else: if is_list_like(self.header): if len(self.header) != len(self.columns): raise ValueError(('Writing {ncols} cols but got {nalias} ' 'aliases' .format(ncols=len(self.columns), nalias=len(self.header)))) str_columns = [[label] for label in self.header] else: str_columns = self._get_formatted_column_labels(frame) if self.show_row_idx_names: for x in str_columns: x.append('') stringified = [] for i, c in enumerate(frame): cheader = str_columns[i] header_colwidth = max(self.col_space or 0, *(self.adj.len(x) for x in cheader)) fmt_values = self._format_col(i) fmt_values = _make_fixed_width(fmt_values, self.justify, minimum=header_colwidth, adj=self.adj) max_len = max(max(self.adj.len(x) for x in fmt_values), header_colwidth) cheader = self.adj.justify(cheader, max_len, mode=self.justify) stringified.append(cheader + fmt_values) strcols = stringified if self.index: strcols.insert(0, str_index) # Add ... to signal truncated truncate_h = self.truncate_h truncate_v = self.truncate_v if truncate_h: col_num = self.tr_col_num strcols.insert(self.tr_col_num + 1, [' ...'] * (len(str_index))) if truncate_v: n_header_rows = len(str_index) - len(frame) row_num = self.tr_row_num for ix, col in enumerate(strcols): # infer from above row cwidth = self.adj.len(strcols[ix][row_num]) is_dot_col = False if truncate_h: is_dot_col = ix == col_num + 1 if cwidth > 3 or is_dot_col: my_str = '...' else: my_str = '..' if ix == 0: dot_mode = 'left' elif is_dot_col: cwidth = 4 dot_mode = 'right' else: dot_mode = 'right' dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0] strcols[ix].insert(row_num + n_header_rows, dot_str) return strcols
Render a DataFrame to a console-friendly tabular output. def to_string(self): """ Render a DataFrame to a console-friendly tabular output. """ from pandas import Series frame = self.frame if len(frame.columns) == 0 or len(frame.index) == 0: info_line = ('Empty {name}\nColumns: {col}\nIndex: {idx}' .format(name=type(self.frame).__name__, col=pprint_thing(frame.columns), idx=pprint_thing(frame.index))) text = info_line else: strcols = self._to_str_columns() if self.line_width is None: # no need to wrap around just print # the whole frame text = self.adj.adjoin(1, *strcols) elif (not isinstance(self.max_cols, int) or self.max_cols > 0): # need to wrap around text = self._join_multiline(*strcols) else: # max_cols == 0. Try to fit frame to terminal text = self.adj.adjoin(1, *strcols).split('\n') max_len = Series(text).str.len().max() # plus truncate dot col dif = max_len - self.w # '+ 1' to avoid too wide repr (GH PR #17023) adj_dif = dif + 1 col_lens = Series([Series(ele).apply(len).max() for ele in strcols]) n_cols = len(col_lens) counter = 0 while adj_dif > 0 and n_cols > 1: counter += 1 mid = int(round(n_cols / 2.)) mid_ix = col_lens.index[mid] col_len = col_lens[mid_ix] # adjoin adds one adj_dif -= (col_len + 1) col_lens = col_lens.drop(mid_ix) n_cols = len(col_lens) # subtract index column max_cols_adj = n_cols - self.index # GH-21180. Ensure that we print at least two. max_cols_adj = max(max_cols_adj, 2) self.max_cols_adj = max_cols_adj # Call again _chk_truncate to cut frame appropriately # and then generate string representation self._chk_truncate() strcols = self._to_str_columns() text = self.adj.adjoin(1, *strcols) self.buf.writelines(text) if self.should_show_dimensions: self.buf.write("\n\n[{nrows} rows x {ncols} columns]" .format(nrows=len(frame), ncols=len(frame.columns)))
Render a DataFrame to a LaTeX tabular/longtable environment output. def to_latex(self, column_format=None, longtable=False, encoding=None, multicolumn=False, multicolumn_format=None, multirow=False): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ from pandas.io.formats.latex import LatexFormatter latex_renderer = LatexFormatter(self, column_format=column_format, longtable=longtable, multicolumn=multicolumn, multicolumn_format=multicolumn_format, multirow=multirow) if encoding is None: encoding = 'utf-8' if hasattr(self.buf, 'write'): latex_renderer.write_result(self.buf) elif isinstance(self.buf, str): import codecs with codecs.open(self.buf, 'w', encoding=encoding) as f: latex_renderer.write_result(f) else: raise TypeError('buf is not a file name and it has no write ' 'method')
Render a DataFrame to a html table. Parameters ---------- classes : str or list-like classes to include in the `class` attribute of the opening ``<table>`` tag, in addition to the default "dataframe". notebook : {True, False}, optional, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening ``<table>`` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 def to_html(self, classes=None, notebook=False, border=None): """ Render a DataFrame to a html table. Parameters ---------- classes : str or list-like classes to include in the `class` attribute of the opening ``<table>`` tag, in addition to the default "dataframe". notebook : {True, False}, optional, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening ``<table>`` tag. Default ``pd.options.html.border``. .. versionadded:: 0.19.0 """ from pandas.io.formats.html import HTMLFormatter, NotebookFormatter Klass = NotebookFormatter if notebook else HTMLFormatter html = Klass(self, classes=classes, border=border).render() if hasattr(self.buf, 'write'): buffer_put_lines(self.buf, html) elif isinstance(self.buf, str): with open(self.buf, 'w') as f: buffer_put_lines(f, html) else: raise TypeError('buf is not a file name and it has no write ' ' method')
Returns a function to be applied on each value to format it def _value_formatter(self, float_format=None, threshold=None): """Returns a function to be applied on each value to format it """ # the float_format parameter supersedes self.float_format if float_format is None: float_format = self.float_format # we are going to compose different functions, to first convert to # a string, then replace the decimal symbol, and finally chop according # to the threshold # when there is no float_format, we use str instead of '%g' # because str(0.0) = '0.0' while '%g' % 0.0 = '0' if float_format: def base_formatter(v): return float_format(value=v) if notna(v) else self.na_rep else: def base_formatter(v): return str(v) if notna(v) else self.na_rep if self.decimal != '.': def decimal_formatter(v): return base_formatter(v).replace('.', self.decimal, 1) else: decimal_formatter = base_formatter if threshold is None: return decimal_formatter def formatter(value): if notna(value): if abs(value) > threshold: return decimal_formatter(value) else: return decimal_formatter(0.0) else: return self.na_rep return formatter
Returns the float values converted into strings using the parameters given at initialisation, as a numpy array def get_result_as_array(self): """ Returns the float values converted into strings using the parameters given at initialisation, as a numpy array """ if self.formatter is not None: return np.array([self.formatter(x) for x in self.values]) if self.fixed_width: threshold = get_option("display.chop_threshold") else: threshold = None # if we have a fixed_width, we'll need to try different float_format def format_values_with(float_format): formatter = self._value_formatter(float_format, threshold) # default formatter leaves a space to the left when formatting # floats, must be consistent for left-justifying NaNs (GH #25061) if self.justify == 'left': na_rep = ' ' + self.na_rep else: na_rep = self.na_rep # separate the wheat from the chaff values = self.values is_complex = is_complex_dtype(values) mask = isna(values) if hasattr(values, 'to_dense'): # sparse numpy ndarray values = values.to_dense() values = np.array(values, dtype='object') values[mask] = na_rep imask = (~mask).ravel() values.flat[imask] = np.array([formatter(val) for val in values.ravel()[imask]]) if self.fixed_width: if is_complex: return _trim_zeros_complex(values, na_rep) else: return _trim_zeros_float(values, na_rep) return values # There is a special default string when we are fixed-width # The default is otherwise to use str instead of a formatting string if self.float_format is None: if self.fixed_width: float_format = partial('{value: .{digits:d}f}'.format, digits=self.digits) else: float_format = self.float_format else: float_format = lambda value: self.float_format % value formatted_values = format_values_with(float_format) if not self.fixed_width: return formatted_values # we need do convert to engineering format if some values are too small # and would appear as 0, or if some values are too big and take too # much space if len(formatted_values) > 0: maxlen = max(len(x) for x in formatted_values) too_long = maxlen > self.digits + 6 else: too_long = False with np.errstate(invalid='ignore'): abs_vals = np.abs(self.values) # this is pretty arbitrary for now # large values: more that 8 characters including decimal symbol # and first digit, hence > 1e6 has_large_values = (abs_vals > 1e6).any() has_small_values = ((abs_vals < 10**(-self.digits)) & (abs_vals > 0)).any() if has_small_values or (too_long and has_large_values): float_format = partial('{value: .{digits:d}e}'.format, digits=self.digits) formatted_values = format_values_with(float_format) return formatted_values
we by definition have DO NOT have a TZ def _format_strings(self): """ we by definition have DO NOT have a TZ """ values = self.values if not isinstance(values, DatetimeIndex): values = DatetimeIndex(values) if self.formatter is not None and callable(self.formatter): return [self.formatter(x) for x in values] fmt_values = format_array_from_datetime( values.asi8.ravel(), format=_get_format_datetime64_from_values(values, self.date_format), na_rep=self.nat_rep).reshape(values.shape) return fmt_values.tolist()
we by definition have a TZ def _format_strings(self): """ we by definition have a TZ """ values = self.values.astype(object) is_dates_only = _is_dates_only(values) formatter = (self.formatter or _get_format_datetime64(is_dates_only, date_format=self.date_format)) fmt_values = [formatter(x) for x in values] return fmt_values
Given an Interval or IntervalIndex, return the corresponding interval with closed bounds. def _get_interval_closed_bounds(interval): """ Given an Interval or IntervalIndex, return the corresponding interval with closed bounds. """ left, right = interval.left, interval.right if interval.open_left: left = _get_next_label(left) if interval.open_right: right = _get_prev_label(right) return left, right
helper for interval_range to check if start/end are valid types def _is_valid_endpoint(endpoint): """helper for interval_range to check if start/end are valid types""" return any([is_number(endpoint), isinstance(endpoint, Timestamp), isinstance(endpoint, Timedelta), endpoint is None])
helper for interval_range to check type compat of start/end/freq def _is_type_compatible(a, b): """helper for interval_range to check type compat of start/end/freq""" is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset)) is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset)) return ((is_number(a) and is_number(b)) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) or com._any_none(a, b))
Return a fixed frequency IntervalIndex Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals end : numeric or datetime-like, default None Right bound for generating intervals periods : integer, default None Number of periods to generate freq : numeric, string, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : string, default None Name of the resulting IntervalIndex closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- rng : IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], closed='right', dtype='interval[int64]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], closed='right', dtype='interval[datetime64[ns]]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], closed='right', dtype='interval[datetime64[ns]]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], closed='both', dtype='interval[int64]') def interval_range(start=None, end=None, periods=None, freq=None, name=None, closed='right'): """ Return a fixed frequency IntervalIndex Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals end : numeric or datetime-like, default None Right bound for generating intervals periods : integer, default None Number of periods to generate freq : numeric, string, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : string, default None Name of the resulting IntervalIndex closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- rng : IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], closed='right', dtype='interval[int64]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], closed='right', dtype='interval[datetime64[ns]]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], closed='right', dtype='interval[datetime64[ns]]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], closed='both', dtype='interval[int64]') """ start = com.maybe_box_datetimelike(start) end = com.maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com._any_none(periods, start, end): freq = 1 if is_number(endpoint) else 'D' if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, and ' 'freq, exactly three must be specified') if not _is_valid_endpoint(start): msg = 'start must be numeric or datetime-like, got {start}' raise ValueError(msg.format(start=start)) elif not _is_valid_endpoint(end): msg = 'end must be numeric or datetime-like, got {end}' raise ValueError(msg.format(end=end)) if is_float(periods): periods = int(periods) elif not is_integer(periods) and periods is not None: msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) if freq is not None and not is_number(freq): try: freq = to_offset(freq) except ValueError: raise ValueError('freq must be numeric or convertible to ' 'DateOffset, got {freq}'.format(freq=freq)) # verify type compatibility if not all([_is_type_compatible(start, end), _is_type_compatible(start, freq), _is_type_compatible(end, freq)]): raise TypeError("start, end, freq need to be type compatible") # +1 to convert interval count to breaks count (n breaks = n-1 intervals) if periods is not None: periods += 1 if is_number(endpoint): # force consistency between start/end/freq (lower end if freq skips it) if com._all_not_none(start, end, freq): end -= (end - start) % freq # compute the period/start/end if unspecified (at most one) if periods is None: periods = int((end - start) // freq) + 1 elif start is None: start = end - (periods - 1) * freq elif end is None: end = start + (periods - 1) * freq breaks = np.linspace(start, end, periods) if all(is_integer(x) for x in com._not_none(start, end, freq)): # np.linspace always produces float output breaks = maybe_downcast_to_dtype(breaks, 'int64') else: # delegate to the appropriate range function if isinstance(endpoint, Timestamp): range_func = date_range else: range_func = timedelta_range breaks = range_func(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
Create the writer & save def save(self): """ Create the writer & save """ # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, 'write'): msg = ("compression has no effect when passing file-like " "object as input.") warnings.warn(msg, RuntimeWarning, stacklevel=2) # when zip compression is called. is_zip = isinstance(self.path_or_buf, ZipFile) or ( not hasattr(self.path_or_buf, 'write') and self.compression == 'zip') if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, 'write'): f = self.path_or_buf close = False else: f, handles = _get_handle(self.path_or_buf, self.mode, encoding=self.encoding, compression=self.compression) close = True try: writer_kwargs = dict(lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar) if self.encoding == 'ascii': self.writer = csvlib.writer(f, **writer_kwargs) else: writer_kwargs['encoding'] = self.encoding self.writer = UnicodeWriter(f, **writer_kwargs) self._save() finally: if is_zip: # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, 'write'): self.path_or_buf.write(buf) else: f, handles = _get_handle(self.path_or_buf, self.mode, encoding=self.encoding, compression=self.compression) f.write(buf) close = True if close: f.close() for _fh in handles: _fh.close()
Add delegated names to a class using a class decorator. This provides an alternative usage to directly calling `_add_delegate_accessors` below a class definition. Parameters ---------- delegate : object the class to get methods/properties & doc-strings accessors : Sequence[str] List of accessor to add typ : {'property', 'method'} overwrite : boolean, default False overwrite the method/property in the target class if it exists Returns ------- callable A class decorator. Examples -------- @delegate_names(Categorical, ["categories", "ordered"], "property") class CategoricalAccessor(PandasDelegate): [...] def delegate_names(delegate, accessors, typ, overwrite=False): """ Add delegated names to a class using a class decorator. This provides an alternative usage to directly calling `_add_delegate_accessors` below a class definition. Parameters ---------- delegate : object the class to get methods/properties & doc-strings accessors : Sequence[str] List of accessor to add typ : {'property', 'method'} overwrite : boolean, default False overwrite the method/property in the target class if it exists Returns ------- callable A class decorator. Examples -------- @delegate_names(Categorical, ["categories", "ordered"], "property") class CategoricalAccessor(PandasDelegate): [...] """ def add_delegate_accessors(cls): cls._add_delegate_accessors(delegate, accessors, typ, overwrite=overwrite) return cls return add_delegate_accessors
Add additional __dir__ for this object. def _dir_additions(self): """ Add additional __dir__ for this object. """ rv = set() for accessor in self._accessors: try: getattr(self, accessor) rv.add(accessor) except AttributeError: pass return rv
Add accessors to cls from the delegate class. Parameters ---------- cls : the class to add the methods/properties to delegate : the class to get methods/properties & doc-strings accessors : string list of accessors to add typ : 'property' or 'method' overwrite : boolean, default False overwrite the method/property in the target class if it exists. def _add_delegate_accessors(cls, delegate, accessors, typ, overwrite=False): """ Add accessors to cls from the delegate class. Parameters ---------- cls : the class to add the methods/properties to delegate : the class to get methods/properties & doc-strings accessors : string list of accessors to add typ : 'property' or 'method' overwrite : boolean, default False overwrite the method/property in the target class if it exists. """ def _create_delegator_property(name): def _getter(self): return self._delegate_property_get(name) def _setter(self, new_values): return self._delegate_property_set(name, new_values) _getter.__name__ = name _setter.__name__ = name return property(fget=_getter, fset=_setter, doc=getattr(delegate, name).__doc__) def _create_delegator_method(name): def f(self, *args, **kwargs): return self._delegate_method(name, *args, **kwargs) f.__name__ = name f.__doc__ = getattr(delegate, name).__doc__ return f for name in accessors: if typ == 'property': f = _create_delegator_property(name) else: f = _create_delegator_method(name) # don't overwrite existing methods/properties if overwrite or not hasattr(cls, name): setattr(cls, name, f)
standard evaluation def _evaluate_standard(op, op_str, a, b, **eval_kwargs): """ standard evaluation """ if _TEST_MODE: _store_test_result(False) with np.errstate(all='ignore'): return op(a, b)
return a boolean if we WILL be using numexpr def _can_use_numexpr(op, op_str, a, b, dtype_check): """ return a boolean if we WILL be using numexpr """ if op_str is not None: # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: # check for dtype compatibility dtypes = set() for o in [a, b]: if hasattr(o, 'get_dtype_counts'): s = o.get_dtype_counts() if len(s) > 1: return False dtypes |= set(s.index) elif isinstance(o, np.ndarray): dtypes |= {o.dtype.name} # allowed are a superset if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: return True return False
evaluate and return the expression of the op on a and b Parameters ---------- op : the actual operand op_str: the string version of the op a : left operand b : right operand use_numexpr : whether to try to use numexpr (default True) def evaluate(op, op_str, a, b, use_numexpr=True, **eval_kwargs): """ evaluate and return the expression of the op on a and b Parameters ---------- op : the actual operand op_str: the string version of the op a : left operand b : right operand use_numexpr : whether to try to use numexpr (default True) """ use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b) if use_numexpr: return _evaluate(op, op_str, a, b, **eval_kwargs) return _evaluate_standard(op, op_str, a, b)
evaluate the where condition cond on a and b Parameters ---------- cond : a boolean array a : return if cond is True b : return if cond is False use_numexpr : whether to try to use numexpr (default True) def where(cond, a, b, use_numexpr=True): """ evaluate the where condition cond on a and b Parameters ---------- cond : a boolean array a : return if cond is True b : return if cond is False use_numexpr : whether to try to use numexpr (default True) """ if use_numexpr: return _where(cond, a, b) return _where_standard(cond, a, b)
writer : string or ExcelWriter object File path or existing ExcelWriter sheet_name : string, default 'Sheet1' Name of sheet which will contain DataFrame startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame freeze_panes : tuple of integer (length 2), default None Specifies the one-based bottommost row and rightmost column that is to be frozen engine : string, default None write engine to use if writer is a path - you can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. def write(self, writer, sheet_name='Sheet1', startrow=0, startcol=0, freeze_panes=None, engine=None): """ writer : string or ExcelWriter object File path or existing ExcelWriter sheet_name : string, default 'Sheet1' Name of sheet which will contain DataFrame startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame freeze_panes : tuple of integer (length 2), default None Specifies the one-based bottommost row and rightmost column that is to be frozen engine : string, default None write engine to use if writer is a path - you can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. """ from pandas.io.excel import ExcelWriter from pandas.io.common import _stringify_path if isinstance(writer, ExcelWriter): need_save = False else: writer = ExcelWriter(_stringify_path(writer), engine=engine) need_save = True formatted_cells = self.get_formatted_cells() writer.write_cells(formatted_cells, sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes) if need_save: writer.save()
Write a DataFrame to the feather-format Parameters ---------- df : DataFrame path : string file path, or file-like object def to_feather(df, path): """ Write a DataFrame to the feather-format Parameters ---------- df : DataFrame path : string file path, or file-like object """ path = _stringify_path(path) if not isinstance(df, DataFrame): raise ValueError("feather only support IO with DataFrames") feather = _try_import()[0] valid_types = {'string', 'unicode'} # validate index # -------------- # validate that we have only a default index # raise on anything else as we don't serialize the index if not isinstance(df.index, Int64Index): raise ValueError("feather does not support serializing {} " "for the index; you can .reset_index()" "to make the index into column(s)".format( type(df.index))) if not df.index.equals(RangeIndex.from_range(range(len(df)))): raise ValueError("feather does not support serializing a " "non-default index for the index; you " "can .reset_index() to make the index " "into column(s)") if df.index.name is not None: raise ValueError("feather does not serialize index meta-data on a " "default index") # validate columns # ---------------- # must have value column names (strings only) if df.columns.inferred_type not in valid_types: raise ValueError("feather must have string column names") feather.write_feather(df, path)
Load a feather-format object from the file path .. versionadded 0.20.0 Parameters ---------- path : string file path, or file-like object columns : sequence, default None If not provided, all columns are read .. versionadded 0.24.0 nthreads : int, default 1 Number of CPU threads to use when reading to pandas.DataFrame .. versionadded 0.21.0 .. deprecated 0.24.0 use_threads : bool, default True Whether to parallelize reading using multiple threads .. versionadded 0.24.0 Returns ------- type of object stored in file def read_feather(path, columns=None, use_threads=True): """ Load a feather-format object from the file path .. versionadded 0.20.0 Parameters ---------- path : string file path, or file-like object columns : sequence, default None If not provided, all columns are read .. versionadded 0.24.0 nthreads : int, default 1 Number of CPU threads to use when reading to pandas.DataFrame .. versionadded 0.21.0 .. deprecated 0.24.0 use_threads : bool, default True Whether to parallelize reading using multiple threads .. versionadded 0.24.0 Returns ------- type of object stored in file """ feather, pyarrow = _try_import() path = _stringify_path(path) if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'): int_use_threads = int(use_threads) if int_use_threads < 1: int_use_threads = 1 return feather.read_feather(path, columns=columns, nthreads=int_use_threads) return feather.read_feather(path, columns=columns, use_threads=bool(use_threads))
Generate a range of dates with the spans between dates described by the given `freq` DateOffset. Parameters ---------- start : Timestamp or None first point of produced date range end : Timestamp or None last point of produced date range periods : int number of periods in produced date range freq : DateOffset describes space between dates in produced date range Returns ------- ndarray[np.int64] representing nanosecond unix timestamps def generate_regular_range(start, end, periods, freq): """ Generate a range of dates with the spans between dates described by the given `freq` DateOffset. Parameters ---------- start : Timestamp or None first point of produced date range end : Timestamp or None last point of produced date range periods : int number of periods in produced date range freq : DateOffset describes space between dates in produced date range Returns ------- ndarray[np.int64] representing nanosecond unix timestamps """ if isinstance(freq, Tick): stride = freq.nanos if periods is None: b = Timestamp(start).value # cannot just use e = Timestamp(end) + 1 because arange breaks when # stride is too large, see GH10887 e = (b + (Timestamp(end).value - b) // stride * stride + stride // 2 + 1) # end.tz == start.tz by this point due to _generate implementation tz = start.tz elif start is not None: b = Timestamp(start).value e = _generate_range_overflow_safe(b, periods, stride, side='start') tz = start.tz elif end is not None: e = Timestamp(end).value + stride b = _generate_range_overflow_safe(e, periods, stride, side='end') tz = end.tz else: raise ValueError("at least 'start' or 'end' should be specified " "if a 'period' is given.") with np.errstate(over="raise"): # If the range is sufficiently large, np.arange may overflow # and incorrectly return an empty array if not caught. try: values = np.arange(b, e, stride, dtype=np.int64) except FloatingPointError: xdr = [b] while xdr[-1] != e: xdr.append(xdr[-1] + stride) values = np.array(xdr[:-1], dtype=np.int64) else: tz = None # start and end should have the same timezone by this point if start is not None: tz = start.tz elif end is not None: tz = end.tz xdr = generate_range(start=start, end=end, periods=periods, offset=freq) values = np.array([x.value for x in xdr], dtype=np.int64) return values, tz
Calculate the second endpoint for passing to np.arange, checking to avoid an integer overflow. Catch OverflowError and re-raise as OutOfBoundsDatetime. Parameters ---------- endpoint : int nanosecond timestamp of the known endpoint of the desired range periods : int number of periods in the desired range stride : int nanoseconds between periods in the desired range side : {'start', 'end'} which end of the range `endpoint` refers to Returns ------- other_end : int Raises ------ OutOfBoundsDatetime def _generate_range_overflow_safe(endpoint, periods, stride, side='start'): """ Calculate the second endpoint for passing to np.arange, checking to avoid an integer overflow. Catch OverflowError and re-raise as OutOfBoundsDatetime. Parameters ---------- endpoint : int nanosecond timestamp of the known endpoint of the desired range periods : int number of periods in the desired range stride : int nanoseconds between periods in the desired range side : {'start', 'end'} which end of the range `endpoint` refers to Returns ------- other_end : int Raises ------ OutOfBoundsDatetime """ # GH#14187 raise instead of incorrectly wrapping around assert side in ['start', 'end'] i64max = np.uint64(np.iinfo(np.int64).max) msg = ('Cannot generate range with {side}={endpoint} and ' 'periods={periods}' .format(side=side, endpoint=endpoint, periods=periods)) with np.errstate(over="raise"): # if periods * strides cannot be multiplied within the *uint64* bounds, # we cannot salvage the operation by recursing, so raise try: addend = np.uint64(periods) * np.uint64(np.abs(stride)) except FloatingPointError: raise OutOfBoundsDatetime(msg) if np.abs(addend) <= i64max: # relatively easy case without casting concerns return _generate_range_overflow_safe_signed( endpoint, periods, stride, side) elif ((endpoint > 0 and side == 'start' and stride > 0) or (endpoint < 0 and side == 'end' and stride > 0)): # no chance of not-overflowing raise OutOfBoundsDatetime(msg) elif (side == 'end' and endpoint > i64max and endpoint - stride <= i64max): # in _generate_regular_range we added `stride` thereby overflowing # the bounds. Adjust to fix this. return _generate_range_overflow_safe(endpoint - stride, periods - 1, stride, side) # split into smaller pieces mid_periods = periods // 2 remaining = periods - mid_periods assert 0 < remaining < periods, (remaining, periods, endpoint, stride) midpoint = _generate_range_overflow_safe(endpoint, mid_periods, stride, side) return _generate_range_overflow_safe(midpoint, remaining, stride, side)