Steven,
Here are a few improvements that I've found. It seems that in initialize, you could combine all the screens in the mask for top_sentiment
like so:
def make_pipeline(context):
# Create our pipeline
pipe = Pipeline()
# Instantiating our factors
factor = PercentSurprise()
weighted_sentiment = WeightedSentimentByVolatility()
# Screen out penny stocks and low liquidity securities.
dollar_volume = AverageDollarVolume(window_length=20)
is_liquid = dollar_volume > 10**7
# Filter down to stocks in the top/bottom
longs = (factor >= context.min_surprise) & (factor <= context.max_surprise)
# Add long/shorts to the pipeline
pipe.add(longs, "longs")
pipe.add(BusinessDaysSincePreviousEarnings(), 'pe')
# Set our pipeline screens
# Filter down stocks using sentiment
base_universe = is_liquid & longs & (weighted_sentiment != 0)
top_sentiment = weighted_sentiment.percentile_between(85, 100, mask=(base_universe))
pipe.set_screen(top_sentiment)
return pipe
Also in order_positions
, it looks like the exit logic I had used before wasn't performing as necessary. This seems closer to what you'd want to use:
# Check if we've exited our positions and if we haven't, exit the remaining securities
# that we have left
for security in port:
if data.can_trade(security):
if context.stocks_held.get(security) is not None:
context.stocks_held[security] += 1
if context.stocks_held[security] >= context.days_to_hold:
order_target_percent(security, 0)
del context.stocks_held[security]
# If we've deleted it but it still hasn't been exited. Try exiting again
else:
log.info("Haven't yet exited %s, ordering again" % security.symbol)
order_target_percent(security, 0)
It also seems like running handle_data
every minute is going to slow down your algorithm a lot and will lead to the short
positions that you're seeing because right now, there isn't a check for open orders so you could be placing 2 subsequent orders for the same security because it meant your stop
conditions. You can fix that by adding a quick if not get_open_orders(security)
. For speed improvements, I'd suggest moving this to a schedule_function method.
Here's the full example:
"""
This is a PEAD strategy based off Estimize's earnings estimates. Estimize
is a service that aggregate financial estimates from independent, buy-side,
sell-side analysts as well as students and professors. You can run this
algorithm yourself by geting the free sample version of Estimize's consensus
dataset and EventVestor's Earnings Calendar Dataset at:
- https://www.quantopian.com/data/eventvestor/earnings_calendar
- https://www.quantopian.com/data/estimize/revisions
Much of the variables are meant for you to be able to play around with them:
1. context.days_to_hold: defines the number of days you want to hold before exiting a position
2. context.min/max_surprise: defines the min/max % surprise you want before trading on a signal
"""
import numpy as np
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import CustomFactor, AverageDollarVolume
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline.data.accern import alphaone as alphaone
from quantopian.pipeline.data.estimize import (
ConsensusEstimizeEPS,
ConsensusWallstreetEPS,
ConsensusEstimizeRevenue,
ConsensusWallstreetRevenue
)
# The sample and full version is found through the same namespace
# https://www.quantopian.com/data/eventvestor/earnings_calendar
# Sample date ranges: 01 Jan 2007 - 10 Feb 2014
from quantopian.pipeline.data.eventvestor import EarningsCalendar
from quantopian.pipeline.factors.eventvestor import (
BusinessDaysUntilNextEarnings,
BusinessDaysSincePreviousEarnings
)
# Create custom factor subclass to calculate a market cap based on yesterday's
# close
class PercentSurprise(CustomFactor):
window_length = 1
inputs = [ConsensusEstimizeEPS.previous_actual_value,
ConsensusEstimizeEPS.previous_mean]
# Compute market cap value
def compute(self, today, assets, out, actual_eps, estimize_eps):
out[:] = (actual_eps[-1] - estimize_eps[-1])/(estimize_eps[-1] + 0)
"""
class DailySentimentByImpactScore(CustomFactor):
# Economic Hypothesis: Accern reports both an `impact score`
# and `article sentiment`. The `impact score` is used to measure
# the likelihood that a security's price changes by more than 1%
# in the following day. The `article sentiment` is a quantified daily
# measure of news & blog sentiment about a given security. This combined
# measure of `impact score` and `article sentiment` may hold information
# about price changes in the following day.
inputs = [alphaone.article_sentiment, alphaone.impact_score]
window_length = 1
def compute(self, today, assets, out, sentiment, impact_score):
out[:] = sentiment * impact_score
"""
class WeightedSentimentByVolatility(CustomFactor):
# Economic Hypothesis: Sentiment volatility can be an indicator that
# public news is changing rapidly about a given security. So securities
# with a high level of sentiment volatility may indicate a change in
# momentum for that stock's price.
inputs = [alphaone.article_sentiment]
window_length = 2
def compute(self, today, assets, out, sentiment):
out[:] = np.nanstd(sentiment, axis=0) * np.nanmean(sentiment, axis=0)
def make_pipeline(context):
# Create our pipeline
pipe = Pipeline()
# Instantiating our factors
factor = PercentSurprise()
weighted_sentiment = WeightedSentimentByVolatility()
# Screen out penny stocks and low liquidity securities.
dollar_volume = AverageDollarVolume(window_length=20)
is_liquid = dollar_volume > 10**7
# Filter down to stocks in the top/bottom
longs = (factor >= context.min_surprise) & (factor <= context.max_surprise)
# Add long/shorts to the pipeline
pipe.add(longs, "longs")
pipe.add(BusinessDaysSincePreviousEarnings(), 'pe')
# Set our pipeline screens
# Filter down stocks using sentiment
base_universe = is_liquid & longs & (weighted_sentiment != 0)
top_sentiment = weighted_sentiment.percentile_between(85, 100, mask=(base_universe))
pipe.set_screen(top_sentiment)
return pipe
def initialize(context):
#: Set commissions and slippage to 0 to determine pure alpha
set_commission(commission.PerShare(cost=0, min_trade_cost=0))
set_slippage(slippage.FixedSlippage(spread=0))
set_long_only()
#: Declaring the days to hold, change this to what you want)))
context.days_to_hold = 5
#: Declares which stocks we currently held and how many days we've held them dict[stock:days_held]
context.stocks_held = {}
context.stocks_exited = {}
#: Declares the minimum magnitude of percent surprise
context.min_surprise = .00
context.max_surprise = .04
# Make our pipeline
attach_pipeline(make_pipeline(context), 'estimize')
# Log our positions at 10:00AM
schedule_function(func=log_positions,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(minutes=30))
# Order our positions
schedule_function(func=order_positions,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open())
# Order our positions
schedule_function(func=extra_orders,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(minutes=60))
def before_trading_start(context, data):
# Screen for securities that only have an earnings release
# 1 business day previous and separate out the earnings surprises into
# positive and negative
results = pipeline_output('estimize')
results = results[results['pe'] == 1]
assets_in_universe = results.index
context.positive_surprise = assets_in_universe
def log_positions(context, data):
#: Get all positions
if len(context.portfolio.positions) > 0:
all_positions = "Current positions for %s : " % (str(get_datetime()))
for pos in context.portfolio.positions:
if context.portfolio.positions[pos].amount != 0:
all_positions += "%s at %s shares, " % (pos.symbol, context.portfolio.positions[pos].amount)
log.info(all_positions)
def order_positions(context, data):
"""
Main ordering conditions to always order an equal percentage in each position
so it does a rolling rebalance by looking at the stocks to order today and the stocks
we currently hold in our portfolio.
"""
port = context.portfolio.positions
# Check if we've exited our positions and if we haven't, exit the remaining securities
# that we have left
for security in port:
if data.can_trade(security):
if context.stocks_held.get(security) is not None:
context.stocks_held[security] += 1
if context.stocks_held[security] >= context.days_to_hold:
order_target_percent(security, 0)
del context.stocks_held[security]
# If we've deleted it but it still hasn't been exited. Try exiting again
else:
log.info("Haven't yet exited %s, ordering again" % security.symbol)
order_target_percent(security, 0)
# Check our current positions
current_positive_pos = [pos for pos in port if (port[pos].amount > 0 and pos in context.stocks_held)]
positive_stocks = context.positive_surprise.tolist() + current_positive_pos
# Rebalance our positive surprise securities (existing + new)
for security in positive_stocks:
can_trade = context.stocks_held.get(security) <= context.days_to_hold or \
context.stocks_held.get(security) is None
if data.can_trade(security) and can_trade:
order_target_percent(security, 0.95 / len(positive_stocks))
if context.stocks_held.get(security) is None:
context.stocks_held[security] = 0
def extra_orders(context, data):
for security in context.portfolio.positions:
can_trade = context.stocks_held.get(security) >= 1
if data.can_trade(security) and can_trade and not get_open_orders(security):
current_position = context.portfolio.positions[security].amount
cost_basis = context.portfolio.positions[security].cost_basis
price = data.current(security, 'price')
limit = cost_basis*1.06
stop = cost_basis*0.92
if price >= limit and current_position > 0:
order_target_percent(security, 0)
log.info( str(security) + ' Sold for Profit')
del context.stocks_held[security]
if price <= stop and current_position > 0:
order_target_percent(security, 0)
log.info( str(security) + ' Sold for Loss')
del context.stocks_held[security]
I'm not sure how it will affect results, but I think it'll provide a good starting point for you to improve on.