@@ -8,7 +8,6 @@ v0.17.0 (October 9, 2015)
88.. ipython :: python
99 :suppress:
1010
11- from pandas import * # noqa F401, F403
1211
1312
1413 This is a major release from 0.16.2 and includes a small number of API changes, several new features,
@@ -85,9 +84,9 @@ The new implementation allows for having a single-timezone across all rows, with
8584
8685.. ipython :: python
8786
88- df = DataFrame({' A' : date_range(' 20130101' ,periods = 3 ),
89- ' B' : date_range(' 20130101' ,periods = 3 ,tz = ' US/Eastern' ),
90- ' C' : date_range(' 20130101' ,periods = 3 ,tz = ' CET' )})
87+ df = DataFrame({' A' : date_range(' 20130101' , periods = 3 ),
88+ ' B' : date_range(' 20130101' , periods = 3 , tz = ' US/Eastern' ),
89+ ' C' : date_range(' 20130101' , periods = 3 , tz = ' CET' )})
9190 df
9291 df.dtypes
9392
@@ -112,20 +111,20 @@ This uses a new-dtype representation as well, that is very similar in look-and-f
112111
113112 .. code-block :: ipython
114113
115- In [1]: pd.date_range('20130101',periods=3,tz='US/Eastern')
114+ In [1]: pd.date_range('20130101', periods=3, tz='US/Eastern')
116115 Out[1]: DatetimeIndex(['2013-01-01 00:00:00-05:00', '2013-01-02 00:00:00-05:00',
117116 '2013-01-03 00:00:00-05:00'],
118117 dtype='datetime64[ns]', freq='D', tz='US/Eastern')
119118
120- In [2]: pd.date_range('20130101',periods=3,tz='US/Eastern').dtype
119+ In [2]: pd.date_range('20130101', periods=3, tz='US/Eastern').dtype
121120 Out[2]: dtype('<M8[ns]')
122121
123122 New Behavior:
124123
125124 .. ipython :: python
126125
127- pd.date_range(' 20130101' ,periods = 3 ,tz = ' US/Eastern' )
128- pd.date_range(' 20130101' ,periods = 3 ,tz = ' US/Eastern' ).dtype
126+ pd.date_range(' 20130101' , periods = 3 , tz = ' US/Eastern' )
127+ pd.date_range(' 20130101' , periods = 3 , tz = ' US/Eastern' ).dtype
129128
130129 .. _whatsnew_0170.gil :
131130
@@ -143,8 +142,8 @@ as well as the ``.sum()`` operation.
143142
144143 N = 1000000
145144 ngroups = 10
146- df = DataFrame({' key' : np.random.randint(0 ,ngroups,size = N),
147- ' data' : np.random.randn(N) })
145+ df = DataFrame({' key' : np.random.randint(0 , ngroups, size = N),
146+ ' data' : np.random.randn(N)})
148147 df.groupby(' key' )[' data' ].sum()
149148
150149 Releasing of the GIL could benefit an application that uses threads for user interactions (e.g. QT _), or performing multi-threaded computations. A nice example of a library that can handle these types of computation-in-parallel is the dask _ library.
@@ -175,7 +174,7 @@ As a result of this change, these methods are now all discoverable via tab-compl
175174.. ipython ::
176175 :verbatim:
177176
178- In [15]: df.plot.<TAB>
177+ In [15]: df.plot.<TAB> # noqa: E225, E999
179178 df.plot.area df.plot.barh df.plot.density df.plot.hist df.plot.line df.plot.scatter
180179 df.plot.bar df.plot.box df.plot.hexbin df.plot.kde df.plot.pie
181180
@@ -261,7 +260,7 @@ incrementally.
261260
262261.. code-block :: python
263262
264- for df in pd.read_sas(' sas_xport.xpt' , chunksize = 10000 )
263+ for df in pd.read_sas(' sas_xport.xpt' , chunksize = 10000 ):
265264 do_something(df)
266265
267266 See the :ref: `docs <io.sas >` for more details.
@@ -297,16 +296,16 @@ See the :ref:`documentation <io.excel>` for more details.
297296
298297.. ipython :: python
299298
300- df = pd.DataFrame([[1 ,2 , 3 , 4 ], [5 ,6 , 7 , 8 ]],
301- columns = pd.MultiIndex.from_product([[ ' foo ' , ' bar ' ],[ ' a ' , ' b ' ]],
302- names = [' col1' , ' col2' ]),
303- index = pd.MultiIndex.from_product([[' j' ], [' l' , ' k' ]],
304- names = [' i1' , ' i2' ]))
299+ df = pd.DataFrame([[1 , 2 , 3 , 4 ], [5 , 6 , 7 , 8 ]],
300+ columns = pd.MultiIndex.from_product(
301+ [[ ' foo ' , ' bar ' ], [ ' a ' , ' b ' ]], names = [' col1' , ' col2' ]),
302+ index = pd.MultiIndex.from_product([[' j' ], [' l' , ' k' ]],
303+ names = [' i1' , ' i2' ]))
305304
306305 df
307306 df.to_excel(' test.xlsx' )
308307
309- df = pd.read_excel(' test.xlsx' , header = [0 ,1 ], index_col = [0 ,1 ])
308+ df = pd.read_excel(' test.xlsx' , header = [0 , 1 ], index_col = [0 , 1 ])
310309 df
311310
312311 .. ipython :: python
@@ -412,15 +411,15 @@ Other enhancements
412411
413412 .. ipython :: python
414413
415- foo = pd.Series([1 ,2 ], name = ' foo' )
416- bar = pd.Series([1 ,2 ])
417- baz = pd.Series([4 ,5 ])
414+ foo = pd.Series([1 , 2 ], name = ' foo' )
415+ bar = pd.Series([1 , 2 ])
416+ baz = pd.Series([4 , 5 ])
418417
419418 Previous Behavior:
420419
421420 .. code-block :: ipython
422421
423- In [1] pd.concat([foo, bar, baz], 1)
422+ In [1]: pd.concat([foo, bar, baz], 1)
424423 Out[1]:
425424 0 1 2
426425 0 1 1 4
@@ -748,14 +747,14 @@ Previous Behavior:
748747
749748.. code-block :: ipython
750749
751- In [5]: s== None
750+ In [5]: s == None
752751 TypeError: Could not compare <type 'NoneType'> type with Series
753752
754753 New Behavior:
755754
756755.. ipython :: python
757756
758- s== None
757+ s == None
759758
760759 Usually you simply want to know which values are null.
761760
@@ -784,8 +783,8 @@ Previous Behavior:
784783
785784.. ipython :: python
786785
787- df_with_missing = pd.DataFrame({' col1' :[0 , np.nan, 2 ],
788- ' col2' :[1 , np.nan, np.nan]})
786+ df_with_missing = pd.DataFrame({' col1' : [0 , np.nan, 2 ],
787+ ' col2' : [1 , np.nan, np.nan]})
789788
790789 df_with_missing
791790
@@ -817,8 +816,8 @@ New Behavior:
817816
818817 df_with_missing.to_hdf(' file.h5' ,
819818 ' df_with_missing' ,
820- format = ' table' ,
821- mode = ' w' )
819+ format = ' table' ,
820+ mode = ' w' )
822821
823822 pd.read_hdf(' file.h5' , ' df_with_missing' )
824823
0 commit comments