1.数据库连接与数据读取
连接数据库,创建表等:
import pandas as pd
from sqlalchemy import create_engine
'''
DB_CONNECT_STRING = 'mysql+mysqldb://root:123@localhost/ooxx?charset=utf8'
engine = create_engine(DB_CONNECT_STRING, echo=True)
'''
engine = create_engine('sqlite:///../data/data.db')
engine.execute('DROP TABLE tiezi_tb')
# 把tb列表转化为pandas的dataframe,之后把这个表直接插入数据库
df = pd.DataFrame(tb, columns=tb_head)
df.to_sql('author_tb', engine, if_exists='append')
# 可以使用read_sql直接读取数据,也可以写个函数生成sql
df = pd.read_sql_query('SELECT count(a.tieID) fa_num, avg(a.num_reply) mean_reply, \
b.user_name, b.cur_score, b.is_like, b.level_id, b.user_sex, b.bawu \
FROM tiezi_tb a, author_tb b \
where a.user_id = b.user_id \
group by a.user_id', engine)
print df.describe()
读取zip数据
import requests
import StringIO
import zipfile
def getZIP(zipFileName):
r = requests.get(zipFileName).content
s = StringIO.StringIO(r)
zf = zipfile.ZipFile(s, 'r') # Read in a list of zipped files
return zf
url = 'http://seanlahman.com/files/database/lahman-csv_2014-02-14.zip'
zf = getZIP(url)
print zf.namelist()
salaries = pd.read_csv(zf.open('Salaries.csv'))
salaries.head()
2.选择与定位
# 如果是数字定位,选择 iloc
df.iloc[1,1]
df.iloc[0:2,:]
# 直接定位列
df[['day', 'year']]
# 筛选, 下面三个等价,推荐使用query,针对列名的操作
flights[(flights.month == 1) & (flights.day == 1)]
flights[(flights['month'] == 1) & (flights['day'] == 1)]
flights.query("month == 1 & day == 1")
# where 操作
df['logic'] = np.where(df['AAA'] > 5,'high','low')
3、新增与变更
参考pandas官网的Merge, join, and concatenate操作
# 新增两列
'''
fights 的colum:year,month,day dep_time,dep_delay,arr_time,arr_delay,carrier tailnum flight origin dest,air_time,distance,hour, minute
'''
flights.assign(gain=flights.arr_delay - flights.dep_delay,
speed=flights.distance / flights.air_time * 60)
# 也可以直接如下
flights['gain'] = flights.arr_delay - flights.dep_delay
joined['Wins'] = np.log(joined['W'])
flights.rename(columns={'tailnum': 'tail_num'}) # rename 操作
# 上面是直接增加一列,如果是两个数据合并,采用merge, how = 'left', 'right', 'outer', 'inner'. Defaults to inner.merge是数据库形式的,个人推荐
'''
merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True)
'''
joined = pd.merge(data, countries, how="inner", on=['Country'])
# 使用concat
'''
concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False)
'''
frames = [df1, df2, df3] # 列不变
result = pd.concat(frames)
result = pd.concat([df1, df4], axis=1, join='inner') # 行不变
result = pd.concat([df1, df4], axis=1, join_axes=[df1.index]) # 基于index匹配
4、聚合
'''
DataFrame.groupby(by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False)
as_index: For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively “SQL-style” grouped output
sort : boolean, default True;Sort group keys. Get better performance by turning this off.
as_index建议加上去
'''
df = df[["a","b","c"]].groupby(["a", "b"], as_index=False).sum()