update
This commit is contained in:
commit
026831c215
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,74 @@
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import io
|
||||||
|
import demjson
|
||||||
|
import requests
|
||||||
|
from fake_useragent import UserAgent
|
||||||
|
|
||||||
|
|
||||||
|
url = {
|
||||||
|
"eurostat": "http://ec.europa.eu/eurostat/wdds/rest/data/v2.1/json/en/",
|
||||||
|
"ecb": "https://sdw-wsrest.ecb.europa.eu/service/data/"
|
||||||
|
}
|
||||||
|
|
||||||
|
class ecb_data(object):
|
||||||
|
def __init__(self, url=url["ecb"]):
|
||||||
|
self.url = url
|
||||||
|
|
||||||
|
def codebook(self):
|
||||||
|
return "please follow the ECB's codebook: https://sdw.ecb.europa.eu/browse.do?node=9691101"
|
||||||
|
|
||||||
|
def get_data(self,
|
||||||
|
datacode="ICP",
|
||||||
|
key="M.U2.N.000000.4.ANR",
|
||||||
|
startdate="2000-01-01",
|
||||||
|
enddate="2020-01-01"):
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
tmp_url = self.url + "{}/".format(datacode) + "{}".format(key)
|
||||||
|
ua = UserAgent()
|
||||||
|
request_header = {"User-Agent": ua.random, 'Accept': 'text/csv'}
|
||||||
|
request_params = {
|
||||||
|
"startPeriod": "{}".format(startdate),
|
||||||
|
"endPeriod": "{}".format(enddate)
|
||||||
|
}
|
||||||
|
r = requests.get(tmp_url, params = request_params, headers = request_header)
|
||||||
|
data_text = r.content
|
||||||
|
df = pd.read_csv(io.StringIO(data_text.decode('utf-8')))
|
||||||
|
return df
|
||||||
|
|
||||||
|
class eurostat_data(object):
|
||||||
|
def __init__(self, url=url["eurostat"]):
|
||||||
|
self.url = url
|
||||||
|
|
||||||
|
def codebook(self):
|
||||||
|
return "please follow the EuroStat's codebook: \nhttps://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing?sort=1&dir=dic"
|
||||||
|
|
||||||
|
def get_data(self,
|
||||||
|
datasetcode="nama_10_gdp",
|
||||||
|
precision="1",
|
||||||
|
unit="CP_MEUR",
|
||||||
|
na_item="B1GQ",
|
||||||
|
time="2020"):
|
||||||
|
"""
|
||||||
|
"""
|
||||||
|
tmp_url = self.url + "{}".format(datasetcode)
|
||||||
|
ua = UserAgent()
|
||||||
|
request_header = {"User-Agent": ua.random, 'Accept': 'text/csv'}
|
||||||
|
request_params = {
|
||||||
|
"precision": "{}".format(precision),
|
||||||
|
"unit": "{}".format(unit),
|
||||||
|
"na_item": "{}".format(na_item),
|
||||||
|
"time": "{}".format(time)
|
||||||
|
}
|
||||||
|
r = requests.get(tmp_url, params = request_params, headers = request_header)
|
||||||
|
data_text = r.text
|
||||||
|
data_json = demjson.decode(data_text)
|
||||||
|
value = data_json['value']
|
||||||
|
abb = data_json['dimension']['geo']['category']['index']
|
||||||
|
abb = {abb[k]:k for k in abb}
|
||||||
|
geo = data_json['dimension']['geo']['category']['label']
|
||||||
|
geo_list = [abb[int(k)] for k in list(value.keys())]
|
||||||
|
geo = [geo[k] for k in geo_list]
|
||||||
|
df = pd.DataFrame({"Geo":geo, "{}".format(na_item): list(value.values())})
|
||||||
|
return df
|
|
@ -3,10 +3,14 @@ import numpy as np
|
||||||
import requests
|
import requests
|
||||||
from fake_useragent import UserAgent
|
from fake_useragent import UserAgent
|
||||||
import io
|
import io
|
||||||
|
import os
|
||||||
|
import demjson
|
||||||
|
|
||||||
# Main Economic Indicators: https://alfred.stlouisfed.org/release?rid=205
|
# Main Economic Indicators: https://alfred.stlouisfed.org/release?rid=205
|
||||||
url = {
|
url = {
|
||||||
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?"
|
"fred_econ": "https://fred.stlouisfed.org/graph/fredgraph.csv?",
|
||||||
|
"philfed": "https://www.philadelphiafed.org/surveys-and-data/real-time-data-research/",
|
||||||
|
"chicagofed": "https://www.chicagofed.org/~/media/publications/"
|
||||||
}
|
}
|
||||||
|
|
||||||
def gdp_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
|
def gdp_quarterly(startdate="1947-01-01", enddate="2021-01-01"):
|
||||||
|
@ -682,3 +686,94 @@ def bir(startdate="2003-01-01", enddate="2021-01-01"):
|
||||||
df = pd.merge_asof(df_5y, df_10y, on = "DATE", direction = "backward")
|
df = pd.merge_asof(df_5y, df_10y, on = "DATE", direction = "backward")
|
||||||
df.columns = ["Date", "BIR_5y", "BIR_10y"]
|
df.columns = ["Date", "BIR_5y", "BIR_10y"]
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
def adsbci():
|
||||||
|
"""
|
||||||
|
An index designed to track real business conditions at high observation frequency
|
||||||
|
"""
|
||||||
|
ua = UserAgent()
|
||||||
|
request_header = {"User-Agent": ua.random}
|
||||||
|
tmp_url = url["philfed"] + "ads"
|
||||||
|
r = requests.get(tmp_url, headers = request_header)
|
||||||
|
file = open("ads_temp.xls", "wb")
|
||||||
|
file.write(r.content)
|
||||||
|
file.close()
|
||||||
|
df = pd.read_excel("ads_temp.xls")
|
||||||
|
df.columns = ["Date", "ADS_Index"]
|
||||||
|
df['Date'] = pd.to_datetime(df["Date"], format="%Y:%m:%d")
|
||||||
|
os.remove("ads_temp.xls")
|
||||||
|
return df
|
||||||
|
|
||||||
|
def pci():
|
||||||
|
"""
|
||||||
|
Tracks the degree of political disagreement among U.S. politicians at the federal level, Monthly
|
||||||
|
"""
|
||||||
|
df = pd.read_excel("https://www.philadelphiafed.org/-/media/frbp/assets/data-visualizations/partisan-conflict.xlsx")
|
||||||
|
df["Date"] = df["Year"].astype(str) + df["Month"]
|
||||||
|
df["Date"] = pd.to_datetime(df["Date"], format = "%Y%B")
|
||||||
|
df = df.drop(["Year", "Month"], axis=1)
|
||||||
|
df = df[["Date", "Partisan Conflict"]]
|
||||||
|
return df
|
||||||
|
|
||||||
|
def inflation_noewcasting():
|
||||||
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
ua = UserAgent()
|
||||||
|
request_header = {"User-Agent": ua.random}
|
||||||
|
tmp_url = "https://www.clevelandfed.org/~/media/files/charting/%20nowcast_quarter.json"
|
||||||
|
|
||||||
|
r = requests.get(tmp_url, headers = request_header)
|
||||||
|
tmp_df = pd.DataFrame(demjson.decode(r.text))
|
||||||
|
df = pd.DataFrame()
|
||||||
|
for i in range(0, len(tmp_df)):
|
||||||
|
date = tmp_df['chart'][i]['subcaption'][:4] + "/" + \
|
||||||
|
pd.DataFrame(tmp_df["dataset"][i][0]['data'])['tooltext'].str.extract(r"\b(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])\b")[0] + "/" + \
|
||||||
|
pd.DataFrame(tmp_df["dataset"][i][0]['data'])['tooltext'].str.extract(r"\b(0?[1-9]|1[0-2])/(0?[1-9]|[12][0-9]|3[01])\b")[1]
|
||||||
|
CPI_I = pd.DataFrame((pd.DataFrame(tmp_df["dataset"][i])['data'])[0])["value"]
|
||||||
|
C_CPI_I = pd.DataFrame((pd.DataFrame(tmp_df["dataset"][i])['data'])[1])["value"]
|
||||||
|
PCE_I = pd.DataFrame((pd.DataFrame(tmp_df["dataset"][i])['data'])[2])["value"]
|
||||||
|
C_PCE_I = pd.DataFrame((pd.DataFrame(tmp_df["dataset"][i])['data'])[3])["value"]
|
||||||
|
A_CPI_I = pd.DataFrame((pd.DataFrame(tmp_df["dataset"][i])['data'])[4])["value"]
|
||||||
|
A_C_CPI_I = pd.DataFrame((pd.DataFrame(tmp_df["dataset"][i])['data'])[5])["value"]
|
||||||
|
A_PCE_I = pd.DataFrame((pd.DataFrame(tmp_df["dataset"][i])['data'])[6])["value"]
|
||||||
|
A_C_PCE_I = pd.DataFrame((pd.DataFrame(tmp_df["dataset"][i])['data'])[7])["value"]
|
||||||
|
tmp_df2 = pd.DataFrame({"date": date,
|
||||||
|
"CPI_I": CPI_I,
|
||||||
|
"C_CPI_I": C_CPI_I,
|
||||||
|
"PCE_I": PCE_I,
|
||||||
|
"C_PCE_I": C_PCE_I,
|
||||||
|
"A_CPI_I": A_CPI_I,
|
||||||
|
"A_C_CPI_I": A_C_CPI_I,
|
||||||
|
"A_PCE_I": A_PCE_I,
|
||||||
|
"A_C_PCE_I": A_C_PCE_I})
|
||||||
|
df = pd.concat([df,tmp_df2], axis=0)
|
||||||
|
df.reset_index(drop=True, inplace=True)
|
||||||
|
|
||||||
|
df.replace('', np.nan, inplace = True)
|
||||||
|
return df
|
||||||
|
|
||||||
|
def bbki():
|
||||||
|
tmp_url = url["chicagofed"] + "bbki/bbki-monthly-data-series-csv.csv"
|
||||||
|
df = pd.read_csv(tmp_url)
|
||||||
|
return df
|
||||||
|
|
||||||
|
def cfnai():
|
||||||
|
tmp_url = url["chicagofed"] + "cfnai/cfnai-data-series-csv.csv"
|
||||||
|
df = pd.read_csv(tmp_url)
|
||||||
|
return df
|
||||||
|
|
||||||
|
def cfsbc():
|
||||||
|
tmp_url = url["chicagofed"] + "cfsbc-activity-index-csv.csv"
|
||||||
|
df = pd.read_csv(tmp_url)
|
||||||
|
return df
|
||||||
|
|
||||||
|
def nfci():
|
||||||
|
tmp_url = url["chicagofed"] + "nfci/decomposition-nfci-csv.csv"
|
||||||
|
df = pd.read_csv(tmp_url)
|
||||||
|
return df
|
||||||
|
|
||||||
|
def nfci():
|
||||||
|
tmp_url = url["chicagofed"] + "nfci/decomposition-anfci-csv.csv"
|
||||||
|
df = pd.read_csv(tmp_url)
|
||||||
|
return df
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# time: 05/29/2021 UTC+8
|
||||||
|
# author: terencelau
|
||||||
|
# email: t_lau@uicstat.com
|
|
@ -47,9 +47,18 @@ def market_data(
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
data = market_data(instrument="eurusd",
|
data = market_data(instrument="eurusd",
|
||||||
|
<<<<<<< HEAD
|
||||||
|
startdate="2020-01-01",
|
||||||
|
enddate="2021-01-01",
|
||||||
|
timeframe="d1",
|
||||||
|
pricetype="bid",
|
||||||
|
volume=True,
|
||||||
|
flat=True)
|
||||||
|
=======
|
||||||
startdate="2020-01-01",
|
startdate="2020-01-01",
|
||||||
enddate="2021-01-01",
|
enddate="2021-01-01",
|
||||||
timeframe="d1",
|
timeframe="d1",
|
||||||
pricetype="bid",
|
pricetype="bid",
|
||||||
volume=True,
|
volume=True,
|
||||||
flat=True)
|
flat=True)
|
||||||
|
>>>>>>> master
|
||||||
|
|
|
@ -1,29 +1 @@
|
||||||
from CEDA.MacroEcon.cn import (
|
from CEDA import *
|
||||||
gdp_quarterly,
|
|
||||||
ppi_monthly,
|
|
||||||
cpi_monthly,
|
|
||||||
pmi_monthly,
|
|
||||||
fai_monthly,
|
|
||||||
hi_old_monthly,
|
|
||||||
hi_new_monthly,
|
|
||||||
ci_eei_monthly,
|
|
||||||
ig_monthly,
|
|
||||||
cgpi_monthly,
|
|
||||||
cci_csi_cei_monthly,
|
|
||||||
trscg_monthly,
|
|
||||||
ms_monthly,
|
|
||||||
ie_monthly,
|
|
||||||
stock_monthly,
|
|
||||||
fgr_monthly,
|
|
||||||
ctsf_monthly,
|
|
||||||
sao_monthly,
|
|
||||||
fdi_monthly,
|
|
||||||
gr_monthly,
|
|
||||||
ti_monthly,
|
|
||||||
nl_monthly,
|
|
||||||
dfclc_monthly,
|
|
||||||
fl_monthly,
|
|
||||||
drr_monthly,
|
|
||||||
interest_monthly,
|
|
||||||
gdc_daily
|
|
||||||
)
|
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
import requests
|
||||||
|
|
||||||
|
def config(http:str, https:str, auth:bool, user:str, passwd:str):
|
||||||
|
if auth == False:
|
||||||
|
proxies = {
|
||||||
|
"http": "{}".format(http),
|
||||||
|
"https": "{}".format(https)
|
||||||
|
}
|
||||||
|
return proxies
|
||||||
|
if auth == True:
|
||||||
|
proxies = {
|
||||||
|
"http": "http://{}:{}@{}".format(user, passwd, http),
|
||||||
|
"https": "https://{}:{}@{}".format(user, passwd, https),
|
||||||
|
}
|
||||||
|
return proxies
|
8
setup.py
8
setup.py
|
@ -2,7 +2,15 @@ from setuptools import setup, find_packages
|
||||||
import os
|
import os
|
||||||
setup(
|
setup(
|
||||||
name = "CEDApy",
|
name = "CEDApy",
|
||||||
|
<<<<<<< HEAD
|
||||||
version = "1.0.6",
|
version = "1.0.6",
|
||||||
|
=======
|
||||||
|
<<<<<<< HEAD
|
||||||
|
version = "1.0.3",
|
||||||
|
=======
|
||||||
|
version = "1.0.3",
|
||||||
|
>>>>>>> master
|
||||||
|
>>>>>>> 50f1f6ba74d680860a737fec2f272395305d9c5c
|
||||||
keywords = "quantitative economic data",
|
keywords = "quantitative economic data",
|
||||||
long_description = open(
|
long_description = open(
|
||||||
os.path.join(
|
os.path.join(
|
||||||
|
|
Loading…
Reference in New Issue