This is an attempt to document how I tried, and failed to run a pipeline backtest on backtest.
We use the algorithm from https://www.quantopian.com/posts/introducing-the-pipeline-api to demostrate how to run backtest in the environment.
Summary in issues we saw is:
NoEngineRegistered: Attempted to run a pipeline but no pipeline resources were registered.
Looking at https://github.com/quantopian/zipline/blob/master/zipline/pipeline/engine.py#L31 seems like this we need to provide get_pipeline_loader
for the algorithm:
algo_obj = TradingAlgorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
get_pipeline_loader=???
)
Any way we can provide the pipeline_loader?
# example data input: any way to improve this?
from datetime import datetime
import pytz
start_date = datetime(2015, 10, 25, 9, 0, 0, 0, pytz.utc)
end_date = datetime(2016, 11, 23, 0, 0, 0, 0, pytz.utc)
#start_date = '2015-10-25'
#end_date = '2016-11-23'
quantopian.algorithm
into zipline.api
¶The original algorithm was from https://www.quantopian.com/posts/introducing-the-pipeline-api - we changed this so this would
And import other necessary api funtions
"""
This example comes from a request in the forums.
The post can be found here: https://www.quantopian.com/posts/ranking-system-based-on-trading-volume-slash-shares-outstanding
The request was:
I am stuck trying to build a stock ranking system with two signals:
1. Trading Volume/Shares Outstanding.
2. Price of current day / Price of 60 days ago.
Then rank Russell 2000 stocks every month, long the top 5%, short the bottom 5%.
"""
# STEP1: use from zipline.api instead
#from quantopian.algorithm import attach_pipeline, pipeline_output
from zipline.api import schedule_function, date_rules, time_rules, sid, symbol, symbols, \
get_datetime, order_target_percent, record, attach_pipeline, set_commission, \
order_target, pipeline_output, get_open_orders
#from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline, CustomFilter
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.factors import SimpleMovingAverage, AverageDollarVolume, CustomFactor
import numpy as np
from numpy import nanmin, nanmax, nanmean
from quantopian.pipeline.filters.morningstar import IsPrimaryShare
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline.data import morningstar as mstar
import pandas as pd
from collections import defaultdict
import copy
from copy import deepcopy
import datetime
import pytz
from pandas.tseries.offsets import BDay
import math
import talib
'''
class SidInList(CustomFilter): #USE IF WANT TO USE SPECIFIC STOCKS
"""
Filter returns True for any SID included in parameter tuple passed at creation.
Usage: my_filter = SidInList(sid_list=(23911, 46631))
"""
inputs = []
window_length = 1
params = ('sid_list',)
def compute(self, today, assets, out, sid_list):
out[:] = np.in1d(assets, sid_list)
'''
class SidInList(CustomFilter): #USE FOR PIPELINE STOCKS
"""
Filter returns True for any SID included in parameter tuple passed at creation.
Usage: my_filter = SidInList(sid_list=(23911, 46631))
"""
inputs = []
window_length = 1
#params = ('sid_list',)
def compute(self, today, assets, out):
out[:] = np.in1d(assets)
class WVIXHIST(CustomFactor): #EXTRA FOR RUNNING HISTORY CALCULATED MANUALLY FOR EACH ADDITIONAL DAY
inputs = [USEquityPricing.low,USEquityPricing.close]
window_length = 24
def compute(self, today, assets, out, lows, close):
highest_close = nanmax(close[-23:-1], axis=0)
WVIXHIST = ((highest_close - lows[22]) / (highest_close)) * 100
out[:] = WVIXHIST
def initialize(context):
#set_commission(commission.PerShare(cost=0, min_trade_cost=0))
#set_slippage(slippage.FixedSlippage(spread=0))
#set_slippage(slippage.VolumeShareSlippage(volume_limit=100, price_impact=0))
schedule_function(func=close_positions,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(minutes=15))
schedule_function(order_positions, date_rule = date_rules.every_day(), time_rule=time_rules.market_open(minutes = 5))
schedule_function(attempt2, date_rule = date_rules.every_day(), time_rule=time_rules.market_open())
schedule_function(cleardf, date_rule = date_rules.week_end(), time_rule=time_rules.market_close(minutes = 3))
total_minutes = 6*60 + 30
for i in range(total_minutes):
# Every xx minutes run schedule
if i % 30 == 1:
schedule_function(
stop_loss,
date_rule = date_rules.every_day(),
time_rule=time_rules.market_open(minutes=i),)
context.main_df2 = pd.DataFrame()
context.main_df2a = pd.DataFrame()
context.sma_quotientl = pd.DataFrame()
context.sma_quotients = pd.DataFrame()
#context.sma_quotient = pd.DataFrame({'Date': [],'Asset': [], 'sma_quotient': []})
#context.sma_quotient = pd.DataFrame()
context.max_lvrg = 0
country_ref = mstar.company_reference.country_id.latest.eq('USA')
have_data = mstar.valuation.market_cap.latest.notnull()
mkt_cap_thresh = (mstar.valuation.market_cap.latest > 30e6)
sector_check = Sector().notnull()
xch = (mstar.company_reference.primary_exchange_id.latest.eq('NAS') or \
mstar.company_reference.primary_exchange_id.latest.eq('NYS'))
common_stock = mstar.share_class_reference.security_type.latest.eq('ST00000001')
not_lp_name = ~mstar.company_reference.standard_name.latest.matches('.* L[\\. ]?P\.?$')
not_lp_balance_sheet = mstar.balance_sheet.limited_partnership.latest.isnull()
not_otc = ~mstar.share_class_reference.exchange_id.latest.startswith('OTC')
not_wi = ~mstar.share_class_reference.symbol.latest.endswith('.WI')
not_depository = ~mstar.share_class_reference.is_depositary_receipt.latest
primary_share = IsPrimaryShare()
sma_1 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=5)
min_price = (sma_1 > 10)
# initialize filter/mask ...just leave in in case expand testing set
tradable_filter = (country_ref & mkt_cap_thresh & sector_check & xch &
common_stock & not_lp_name & not_lp_balance_sheet &
have_data & not_otc & not_wi & not_depository & primary_share & min_price)
# lets screen down to the stocks that have averaged
# at least $100M of daily trading dollar volume over the
# past XX days.
# Create a dollar volume factor.
dollar_volume = AverageDollarVolume(window_length=5)
high_dollar_volume = dollar_volume.percentile_between(50, 100)
#include_filter = SidInList(sid_list = (8554, 19920, 37514, 42667, 42223, 42224, 33217, 37083, 40552))
#context.include_filter = SidInList(sid_list = ( sid(8554), sid(37514) )) #USE IF WANT TO USE SPECIFIC STOCKS
#set_benchmark(symbol('SPY'))
''' LIST TO PLAY WITH
sid(37514): -.15, #SPXL BULL 3x
sid(37083): -.15, #SPXS Bear 3x
sid(37049): -.185, #financial
sid(37048): -.185, #financial
sid(49403): -.15, #pharma
sid(49402): -.15, #pharma
sid(39898): -.15, #natural gas
sid(49639): -.15, # natural gas Dec 2015
sid(37513): -.15, #energy
sid(37044): -.15, #energy
sid(40553): -.1, #gold
sid(40554): -.1, #gold
sid(38294): -.1, #fixed income 20+
sid(38292): -.1 #fixed income 20+
sid(38293) - TYD - Daily 7-10 Year Treasury Bull 3x Shares
sid(38295) - TYO - Daily 7-10 Year Treasury Bear 3x Shares
'''
sma = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
high_cap_high_dv = high_dollar_volume & tradable_filter #USE FOR PIPELINE STOCKS
#high_cap_high_dv = context.include_filter #USE IF WANT TO USE SPECIFIC STOCKS
pipe_screen = (high_cap_high_dv)
pipe = Pipeline(screen = pipe_screen)
close = WVIXHIST(mask=high_cap_high_dv)
# Add the desired values to our pipe.
pipe.add(close, 'close')
pipe.add(sma, 'sma')
#pipe.add(dollar_volume, 'dollar_volume')
attach_pipeline(pipe, 'example')
def handle_data(context, data):
'''
if context.account.leverage > context.max_lvrg:
context.max_lvrg = context.account.leverage
port = len(context.portfolio.positions)
record(leverage=context.account.leverage, count = port, maxlev = context.max_lvrg)
'''
pass
'''
# Stop Loss
if len(context.portfolio.positions) >0:
for stock in context.portfolio.positions:
cost = context.portfolio.positions[stock].cost_basis
position = context.portfolio.positions[stock]
if position.amount > 0. and cost > 0.:
if data.current(stock, 'price') < (cost * 0.95):
if not get_open_orders(stock):
loss = np.around(((data.current(stock, 'price') - cost) / cost * 100.00), decimals=3)
log.info("LONG STOP LOSS {stock}->LOSS:{loss} %".format(stock=stock, loss=loss))
order_target_percent(stock, 0)
if data.current(stock, 'price') > (cost * 1.075):
if not get_open_orders(stock):
profit = np.around(((data.current(stock, 'price') - cost) / cost * 100.00), decimals=3)
log.info("LONG PROFIT {stock}->PROFIT:{profit} %".format(stock=stock, profit=profit))
order_target_percent(stock, 0)
if position.amount < 0. and cost > 0.:
if data.current(stock, 'price') > (cost * 1.05):
if not get_open_orders(stock):
loss = -np.around(((data.current(stock, 'price') - cost) / cost * 100.00), decimals=3)
log.info("SHORT STOP LOSS {stock}->LOSS:{loss} %".format(stock=stock, loss=loss))
order_target_percent(stock, 0)
if data.current(stock, 'price') < (cost * 0.925):
if not get_open_orders(stock):
profit = -np.around(((data.current(stock, 'price') - cost) / cost * 100.00), decimals=3)
log.info("SHORT PROFIT {stock}->PROFIT:{profit} %".format(stock=stock, profit=profit))
order_target_percent(stock, 0)
for stock, position in context.portfolio.positions.iteritems(): #shorthand
amount = position.amount
last_sale_price = position.last_sale_price
'''
def before_trading_start(context, data):
context.outputa = pipeline_output('example')
context.output = context.outputa.close
def stop_loss(context, data):
# Stop Loss
if len(context.portfolio.positions) >0:
for stock in context.portfolio.positions:
cost = context.portfolio.positions[stock].cost_basis
position = context.portfolio.positions[stock]
if position.amount > 0. and cost > 0.:
if data.current(stock, 'price') < (cost * 0.95):
if not get_open_orders(stock):
loss = np.around(((data.current(stock, 'price') - cost) / cost * 100.00), decimals=3)
log.info("LONG STOP LOSS {stock}->LOSS:{loss} %".format(stock=stock, loss=loss))
order_target_percent(stock, 0)
if data.current(stock, 'price') > (cost * 1.075):
if not get_open_orders(stock):
profit = np.around(((data.current(stock, 'price') - cost) / cost * 100.00), decimals=3)
log.info("LONG PROFIT {stock}->PROFIT:{profit} %".format(stock=stock, profit=profit))
order_target_percent(stock, 0)
if position.amount < 0. and cost > 0.:
if data.current(stock, 'price') > (cost * 1.05):
if not get_open_orders(stock):
loss = -np.around(((data.current(stock, 'price') - cost) / cost * 100.00), decimals=3)
log.info("SHORT STOP LOSS {stock}->LOSS:{loss} %".format(stock=stock, loss=loss))
order_target_percent(stock, 0)
if data.current(stock, 'price') < (cost * 0.925):
if not get_open_orders(stock):
profit = -np.around(((data.current(stock, 'price') - cost) / cost * 100.00), decimals=3)
log.info("SHORT PROFIT {stock}->PROFIT:{profit} %".format(stock=stock, profit=profit))
order_target_percent(stock, 0)
def order_positions(context, data):
"""
Main ordering conditions to always order an equal percentage in each position
so it does a rolling rebalance by looking at the stocks to order today and the stocks
we currently hold in our portfolio.
"""
if context.main_df2.empty:
context.main_df2 = context.main_df2a.T
if len(context.output.index) != 0:
#log.info("WVIX = " + "\n" + str(context.main_df2.T[-5:]))
context.sma_quotientl = pd.DataFrame()
context.sma_quotients = pd.DataFrame()
for stock in context.output.index:
#if context.main_df2[stock][-1:].values == context.main_df2[stock].index[-1:].asobject:
#context.main_df2 = context.main_df2.ix[context.main_df2[stock == stock]]
#order_positions(context, data)
#WVIX_MAX = nanmax(context.main_df2[stock][-22:-1], axis=0)
vix = context.main_df2[context.main_df2.index == stock].T[-1:] #context.main_df2[stock][-1]
#WVIX_MAX = nanmax(context.main_df2[context.main_df2.index == stock].T[-22:-1], axis=0)
#vix_avg = nanmean(context.main_df2[context.main_df2.index == stock].T[-10:-1], axis=0)
vix_avg = pd.ewma(context.main_df2[context.main_df2.index == stock].T[-5:], span = 4, min_periods=3, ignore_na=False)
#if ((vix_avg[-1] / vix) > 1).item()[-1]: #vix_avg[-1] > vix:
#sma_quotient = vix_avg[-1] / vix
sma_quotientl = vix_avg[-1:] / vix
sma_quotients = vix / vix_avg[-1:]
#log.info("sma_quotient = " + "\n" + str(sma_quotient[-5:]))
#if len(context.sma_quotient.T) < len(context.output.index): ## DOES NOT WORK AFTER FIRST STOCK PASSES
context.sma_quotientl = pd.concat([context.sma_quotientl, sma_quotientl], axis=1)
context.sma_quotients = pd.concat([context.sma_quotients, sma_quotients], axis=1)
#context.sma_quotient = pd.concat([context.sma_quotient.T, sma_quotient.T], axis=1)
#!!!context.sma_quotient = context.sma_quotient.append(sma_quotient.T, ignore_index=False)
todays_list_alll = context.sma_quotientl.T
todays_list_alls = context.sma_quotients.T
cutminl = todays_list_alll[todays_list_alll>=1.0].dropna().quantile(0.5)
cutmaxl = todays_list_alll[todays_list_alll>=1.0].dropna().quantile(0.985)
cutmins = todays_list_alls[todays_list_alls>=1.0].dropna().quantile(0.5)
cutmaxs = todays_list_alls[todays_list_alls>=1.0].dropna().quantile(0.985)
#todays_list_all["longs"] = todays_list_all[todays_list_all>=cutmin][todays_list_all<=cutmax].dropna()#(todays_list_all >=cutoff).dropna()
#todays_list_all["longs"] = todays_list_all[todays_list_all>=cutmin].dropna()
#todays_list_alll["longs"] = todays_list_alll[todays_list_alll>=cutminl][todays_list_alll<=cutmaxl].dropna()
todays_list_alll["longs"] = todays_list_alll[todays_list_alll>=cutminl].dropna(how='any')
todays_list_alll['longs']= todays_list_alll['longs'].replace(np.nan, "False")
for stock in todays_list_alll[todays_list_alll['longs']<>"False"].index:
#if data.current(stock, 'price') < talib.SMA(np.array(data.history(stock, 'close', 91, '1d')), timeperiod=90)[-2]:
if data.current(stock, 'price') < context.outputa.sma[context.outputa.index == stock][-1]: #USE THIS
#todays_list_alll.drop(todays_list_alll.index[[stock]])
todays_list_alll = todays_list_alll[todays_list_alll.index <> stock] #USE THIS
#todays_list_alls["shorts"] = todays_list_alls[todays_list_alls>=cutmins][todays_list_alls<=cutmaxs].dropna()
todays_list_alls["shorts"] = todays_list_alls[todays_list_alls>=cutmins].dropna()
todays_list_alls['shorts']= todays_list_alls['shorts'].replace(np.nan, "False")
for stock in context.portfolio.positions:
if data.can_trade(stock) and stock not in todays_list_alll[todays_list_alll['longs']<>"False"].index and stock not in todays_list_alls[todays_list_alls['shorts']<>"False"].index:
order_target_percent(stock, 0)
for stock in todays_list_alll[todays_list_alll['longs']<>"False"].sort("longs", ascending=False).head(50).index:
if data.can_trade(stock) and stock not in todays_list_alls[todays_list_alls['shorts']<>"False"].index:
order_target_percent(stock, min(0.1, 1.0 / len(todays_list_alll[todays_list_alll['longs']<>"False"].index)))
if len(todays_list_alll[todays_list_alll['longs']<>"False"].index) > 0:
#print "LONG LIST"
log.info(str(len(todays_list_alll[todays_list_alll['longs']<>"False"])) + " in LONG LIST")
#log.info("\n" + str(todays_list_all[todays_list_all['longs']<>"False"]))
if len(todays_list_alls[todays_list_alls['shorts']<>"False"].index) > 0:
#print "LONG LIST"
log.info(str(len(todays_list_alls[todays_list_alls['shorts']<>"False"])) + " in SHORT LIST")
def close_positions(context, data):
for sec, orders in get_open_orders().iteritems():
for oo in orders:
log.info("X CANCELED {0:s} with {1:,d} / {2:,d} filled"
.format(sec.symbol, oo.filled, oo.amount))
cancel_order(oo)
for stock in context.portfolio.positions:
if data.can_trade(stock):
order_target_percent(stock, 0)
def cleardf(context, data):
#preserve memory - reset df
context.main_df2 = pd.DataFrame()
def attempt2(context, data):
#assets_to_test = [ sid(8554), sid(37514) ] #USE IF WANT TO USE SPECIFIC STOCKS
assets_to_test = ( context.output.index ) #USE FOR PIPELINE STOCKS
#Day 1 of kickoff
if len(context.main_df2.T) < 22:
context.main_df2a = pd.DataFrame()
low = data.history(sid(8554), 'low', 24, '1d')
for i in low.index:
x = abs(22 - len(context.main_df2a))
low = data.history(assets_to_test, 'low', 23 + x, '1d')
close = data.history(assets_to_test, 'close', 23 + x + 1, '1d')
highest_close = nanmax(close[:22], axis=0)
SynVIX = ((highest_close - low[20:22].shift(1)[-1:]) / (highest_close)) * 100
context.main_df2a = context.main_df2a.append(SynVIX[assets_to_test], ignore_index=False).drop_duplicates()
else:
#dont use memory recalculating stocks in output that already in df2
context.main_df2 = pd.concat([context.main_df2, context.output], axis=1)
#remove from df thats not in output else x days later if in output will not have complete history
adj1 = context.main_df2.index - context.output.index
if len(adj1) > 0:
for i in range(len(adj1)):
context.main_df2.drop(context.main_df2.index[context.main_df2.index.get_loc(adj1[i])])
#add to df new in output else and build history
adj2 = context.output.index - context.main_df2.index #new stock not current in main_df2
if len(adj2) > 0:
for i in range(len(adj2)):
context.main_df2.drop(context.main_df2.index[context.main_df2.index.get_loc(adj2[i])])
context.main_df2 = context.main_df2.drop(context.main_df2.index[[adj2]]) #remove all from first
context.main_df2b = pd.DataFrame()
low = data.history(sid(8554), 'low', 24, '1d')
for i in low.index:
x = abs(22 - len(context.main_df2b))
low = data.history(adj2, 'low', 23 + x, '1d')
close = data.history(adj2, 'close', 23 + x + 1, '1d')
highest_close = nanmax(close[:22], axis=0)
SynVIX = ((highest_close - low[20:22].shift(1)[-1:]) / (highest_close)) * 100
context.main_df2b = context.main_df2b.append(SynVIX[adj2], ignore_index=False).drop_duplicates()
context.main_df2 = pd.concat([context.main_df2, context.main_df2b.T], axis=1)
# also mock out the Log object
class log(object):
@staticmethod
def info(output):
print output
Redefining initialize - not that we also limit the number of equities that would be added
from zipline.finance import commission
def create_pipeline(top_size=100):
pipe = Pipeline()
country_ref = mstar.company_reference.country_id.latest.eq('USA')
have_data = mstar.valuation.market_cap.latest.notnull()
mkt_cap_thresh = (mstar.valuation.market_cap.latest > 30e6)
sector_check = Sector().notnull()
xch = (mstar.company_reference.primary_exchange_id.latest.eq('NAS') or \
mstar.company_reference.primary_exchange_id.latest.eq('NYS'))
common_stock = mstar.share_class_reference.security_type.latest.eq('ST00000001')
not_lp_name = ~mstar.company_reference.standard_name.latest.matches('.* L[\\. ]?P\.?$')
not_lp_balance_sheet = mstar.balance_sheet.limited_partnership.latest.isnull()
not_otc = ~mstar.share_class_reference.exchange_id.latest.startswith('OTC')
not_wi = ~mstar.share_class_reference.symbol.latest.endswith('.WI')
not_depository = ~mstar.share_class_reference.is_depositary_receipt.latest
primary_share = IsPrimaryShare()
sma_1 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=5)
min_price = (sma_1 > 10)
# initialize filter/mask ...just leave in in case expand testing set
tradable_filter = (country_ref & mkt_cap_thresh & sector_check & xch &
common_stock & not_lp_name & not_lp_balance_sheet &
have_data & not_otc & not_wi & not_depository & primary_share & min_price)
# lets screen down to the stocks that have averaged
# at least $100M of daily trading dollar volume over the
# past XX days.
# Create a dollar volume factor.
dollar_volume = AverageDollarVolume(window_length=5)
high_dollar_volume = dollar_volume.percentile_between(50, 100)
high_cap_high_dv = high_dollar_volume & tradable_filter #USE FOR PIPELINE STOCKS
#high_cap_high_dv = context.include_filter #USE IF WANT TO USE SPECIFIC STOCKS
# Add the two factors defined to the pipeline
sma = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
close = WVIXHIST(mask=high_cap_high_dv)
# Add the desired values to our pipe.
pipe.add(close, 'close')
pipe.add(sma, 'sma')
pipe.set_screen(high_cap_high_dv)
return pipe
# NOTE: redefined from above
def initialize_fixed(context):
pipe = create_pipeline()
attach_pipeline(pipe, 'example')
schedule_function(func=close_positions,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(minutes=15))
schedule_function(order_positions, date_rule = date_rules.every_day(), time_rule=time_rules.market_open(minutes = 5))
schedule_function(attempt2, date_rule = date_rules.every_day(), time_rule=time_rules.market_open())
schedule_function(cleardf, date_rule = date_rules.week_end(), time_rule=time_rules.market_close(minutes = 3))
total_minutes = 6*60 + 30
for i in range(total_minutes):
# Every xx minutes run schedule
if i % 30 == 1:
schedule_function(
stop_loss,
date_rule = date_rules.every_day(),
time_rule=time_rules.market_open(minutes=i),)
context.main_df2 = pd.DataFrame()
context.main_df2a = pd.DataFrame()
context.sma_quotientl = pd.DataFrame()
context.sma_quotients = pd.DataFrame()
#context.sma_quotient = pd.DataFrame({'Date': [],'Asset': [], 'sma_quotient': []})
#context.sma_quotient = pd.DataFrame()
context.max_lvrg = 0
context.main_df2b = pd.DataFrame()
# see below for fix on pipeline
context.last_pipeline_index = 0
pipe = create_pipeline(top_size=20)
from quantopian.research import run_pipeline
pipeline_data = run_pipeline(pipe,
start_date=start_date.isoformat(),
end_date=end_date.isoformat()
)
pipeline_data
Let's get the data we need to run the backtest
'''
# we should narrow down to about top 1000 equities
universe = list(set(pipeline_data.index.get_level_values(1)))
assert len(universe) < 5000, "Universe is too big: {}".format(len(universe))
len(universe)
'''
'''
# ambigious Ambiguous ownership for 1 symbol, multiple assets held the following symbols:
# AGN: intersections: (('2002-01-01 00:00:00', '2015-03-16 00:00:00'),)
EXCLUDE_STOCKS = {} #{'AGN'}
universe = [ s for s in universe if s.symbol not in EXCLUDE_STOCKS ]
'''
# https://www.quantopian.com/help#quantopian_research_get_pricing
data = get_pricing(universe,
start_date=start_date,
end_date=end_date,
#frequency='minute',
symbol_reference_date=start_date,
fields=['price', 'close_price'])
Hermmm..... we get an exception on backtest
NoEngineRegistered: Attempted to run a pipeline but no pipeline resources were registered
before_trading_start
so we use the existing pipeline data¶def before_trading_start_fixed(context, data):
# Call pipelive_output to get the output
#context.output = pipeline_output('ranked_2000')
global pipeline_data
try:
t = get_datetime().date() + timedelta(days=1)
context.outputa = pipeline_data.loc[t]
context.last_pipeline_index = pipeline_data.index.get_loc(t)
except KeyError as e:
# if we cannot find the specific date in pipeline, use last date
context.outputa = pipeline_data.irow(context.last_pipeline_index).reset_index(level=0, drop=True)
# Narrow down the securities to only the top 200 & update my universe
context.output = context.outputa.close
#number_of_stocks = min(len(context.output)/2-1, 100)
#context.long_list = context.output.sort(['combo_rank'], ascending=False).iloc[:number_of_stocks]
#context.short_list = context.output.sort(['combo_rank'], ascending=False).iloc[-number_of_stocks:]
#print number_of_stocks
'''
from zipline import TradingAlgorithm
import datetime
import time
from datetime import timedelta
from zipline.api import get_open_orders
#http://www.zipline.io/appendix.html
algo_obj = TradingAlgorithm(
initialize=initialize_fixed,
#handle_data=handle_data,
before_trading_start=before_trading_start_fixed,
start=start_date,
data_frequency='daily',
end=end_date,
#get_pipeline_loader=run_pipeline
)
# Run algorithms
results = algo_obj.run(
data#data.transpose(2,1,0),
#overwrite_sim_params=True
)
'''
from zipline import TradingAlgorithm
import datetime
import time
from datetime import timedelta
from zipline.api import get_open_orders
#http://www.zipline.io/appendix.html
algo_obj = TradingAlgorithm(
initialize=initialize,
#handle_data=handle_data,
before_trading_start=before_trading_start_fixed,
start=start_date,
data_frequency='daily',
end=end_date,
#get_pipeline_loader=run_pipeline
)
# Run algorithms
results = algo_obj.run(
data#data.transpose(2,1,0),
#overwrite_sim_params=True
)
Check with tearsheet data on the algorithm
Using pyfolio - https://quantopian.github.io/pyfolio/
Examples - https://github.com/quantopian/pyfolio/blob/master/pyfolio/examples/zipline_algo_example.ipynb
#pipe = create_pipeline(top_size=20)
#pipe.show_graph(format='png')
import pyfolio as pf
returns, positions, transactions, gross_lev = pf.utils.extract_rets_pos_txn_from_zipline(results)
pf.create_full_tear_sheet(returns, positions=positions,
transactions=transactions,
gross_lev=gross_lev
)