Notebook
In [16]:
# Concise Checker

# Replace string with your backtest URL tail end.
bt = get_backtest('5ac354b804eeea42662c8c66')

# Note: Backtest needs to be longer than 2 years in order to receive a score.
100% Time: 0:00:02|###########################################################|
In [18]:
'''

Original source: https://www.quantopian.com/posts/long-short-equity-with-risk-model 
This source: https://www.quantopian.com/posts/contest-constraint-check-notebook-with-compact-output

Example output ...

Results: 2011-01-04 to 2014-11-21
Score                     4.5065  Constraints met 8/9
Returns                   109.8%  PASS: Positive
Positions              4.47|5.43  PASS: Max position concentration 4.47% <= 5.0%
Leverage     0.74|0.94|1.06|1.17  PASS: Leverage range 0.94x-1.06x between 0.8x-1.1x
Turnover         2.6|2.9|5.7|6.7  FAIL: 2nd percentile turnover 2.9% < 5.0x
Net exposure             2.6|4.9  PASS: Net exposure (absolute value) 2.6% <= 10.0%
Beta-to-SPY            0.12|0.14  PASS: Beta 0.12 between +/-0.30
Sectors                0.18|0.18  PASS: All sector exposures between +/-0.20
Style                  0.37|0.37  PASS: All style exposures between +/-0.40
Tradable                  96|100  PASS: Investment in QTradableStocksUS >= 95.0%
'''
Out[18]:
'\nNote: the backtest needs to be longer than 2 years in order to receive a score.\n\nOriginal source: https://www.quantopian.com/posts/long-short-equity-with-risk-model \nThis source: https://www.quantopian.com/posts/contest-constraint-check-notebook-with-compact-output\n\nExample output ...\n\nResults: 2011-01-04 to 2014-11-21\nScore                     4.5065  Constraints met 8/9\nReturns                   109.8%  PASS: Positive\nPositions              4.47|5.43  PASS: Max position concentration 4.47% <= 5.0%\nLeverage     0.74|0.94|1.06|1.17  PASS: Leverage range 0.94x-1.06x between 0.8x-1.1x\nTurnover         2.6|2.9|5.7|6.7  FAIL: 2nd percentile turnover 2.9% < 5.0x\nNet exposure             2.6|4.9  PASS: Net exposure (absolute value) 2.6% <= 10.0%\nBeta-to-SPY            0.12|0.14  PASS: Beta 0.12 between +/-0.30\nSectors                0.18|0.18  PASS: All sector exposures between +/-0.20\nStyle                  0.37|0.37  PASS: All style exposures between +/-0.40\nTradable                  96|100  PASS: Investment in QTradableStocksUS >= 95.0%\n'
In [11]:
import empyrical as ep
import pyfolio   as pf
import numpy     as np
import pandas    as pd
from matplotlib import pyplot as plt
from quantopian.research import returns
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
from quantopian.pipeline.filters import QTradableStocksUS
In [12]:
def get_tradable_universe(start, end):
    """
    Gets the tradable universe in a format that can be compared to the positions
    of a backtest.
    """
    pipe = Pipeline(
        columns={'qtu':QTradableStocksUS()}
    )
    df = run_pipeline(pipe, start, end)
    df = df.unstack()
    df.columns = df.columns.droplevel()
    df = df.astype(float).replace(0, np.nan)
    return df

def volatility_adjusted_daily_return(trailing_algorithm_returns):
    """
    Normalize the last daily return in `trailing_algorithm_returns` by the annualized
    volatility of `trailing_algorithm_returns`.
    """
    
    todays_return = trailing_algorithm_returns[-1]
    # Volatility is floored at 2%.
    volatility = max(ep.annual_volatility(trailing_algorithm_returns), 0.02)
    score = (todays_return / volatility)
    
    return score

def compute_score(algorithm_returns):
    """
    Compute the score of a backtest from its algorithm_returns.
    """
    
    result = []
    
    cumulative_score = 0
    count = 0
    
    daily_scores = roll(
        algorithm_returns,
        function=volatility_adjusted_daily_return,
        window=63
    )
    
    cumulative_score = np.cumsum(daily_scores[441:])
    latest_score = cumulative_score[-1]
    
    print ''
    print 'Score computed between %s and %s.' % (cumulative_score.index[0].date(), daily_scores.index[-1].date())
    
    plt.plot(cumulative_score)
    plt.title('Out-of-Sample Score Over Time')
    print 'Cumulative Score: %f' % latest_score
    
    return cumulative_score

def score(algorithm_returns):
    daily_scores = roll(
        algorithm_returns,
        function=volatility_adjusted_daily_return,
        window=63
    )
    return np.cumsum(daily_scores[441:])[-1]
In [13]:
# This code is copied from the empyrical repository.
# Source: https://github.com/quantopian/empyrical/blob/master/empyrical/utils.py#L49
# Includes a fix to the bug reported here: https://github.com/quantopian/empyrical/issues/79
def roll(*args, **kwargs):
    """
    Calculates a given statistic across a rolling time period.
    Parameters
    ----------
    returns : pd.Series or np.ndarray
        Daily returns of the strategy, noncumulative.
        - See full explanation in :func:`~empyrical.stats.cum_returns`.
    factor_returns (optional): float / series
        Benchmark return to compare returns against.
    function:
        the function to run for each rolling window.
    window (keyword): int
        the number of periods included in each calculation.
    (other keywords): other keywords that are required to be passed to the
        function in the 'function' argument may also be passed in.
    Returns
    -------
    np.ndarray, pd.Series
        depends on input type
        ndarray(s) ==> ndarray
        Series(s) ==> pd.Series
        A Series or ndarray of the results of the stat across the rolling
        window.
    """
    func = kwargs.pop('function')
    window = kwargs.pop('window')
    if len(args) > 2:
        raise ValueError("Cannot pass more than 2 return sets")

    if len(args) == 2:
        if not isinstance(args[0], type(args[1])):
            raise ValueError("The two returns arguments are not the same.")

    if isinstance(args[0], np.ndarray):
        return _roll_numpy(func, window, *args, **kwargs)
    return _roll_pandas(func, window, *args, **kwargs)

def _roll_ndarray(func, window, *args, **kwargs):
    data = []
    for i in range(window, len(args[0]) + 1):
        rets = [s[i-window:i] for s in args]
        data.append(func(*rets, **kwargs))
    return np.array(data)


def _roll_pandas(func, window, *args, **kwargs):
    data = {}
    for i in range(window, len(args[0]) + 1):
        rets = [s.iloc[i-window:i] for s in args]
        data[args[0].index[i - 1]] = func(*rets, **kwargs)
    return pd.Series(data)
In [14]:
SECTORS = [
    'basic_materials', 'consumer_cyclical', 'financial_services',
    'real_estate', 'consumer_defensive', 'health_care', 'utilities',
    'communication_services', 'energy', 'industrials', 'technology'
]

STYLES = [
    'momentum', 'size', 'value', 'short_term_reversal', 'volatility'
]

POSITION_CONCENTRATION_98TH_MAX  =  .05
POSITION_CONCENTRATION_100TH_MAX =  .1
DAILY_TURNOVER_0TH_MIN           =  .03
DAILY_TURNOVER_2ND_MIN           =  .05
DAILY_TURNOVER_98TH_MAX          =  .65
DAILY_TURNOVER_100TH_MAX         =  .8
NET_EXPOSURE_LIMIT_98TH_MAX      =  .1
NET_EXPOSURE_LIMIT_100TH_MAX     =  .2
BETA_TO_SPY_98TH_MAX             =  .3
BETA_TO_SPY_100TH_MAX            =  .4
SECTOR_EXPOSURE_98TH_MAX         =  .2
SECTOR_EXPOSURE_100TH_MAX        =  .25
STYLE_EXPOSURE_98TH_MAX          =  .4
STYLE_EXPOSURE_100TH_MAX         =  .5
TRADABLE_UNIVERSE_0TH_MIN        =  .9
TRADABLE_UNIVERSE_2ND_MIN        =  .95
LEVERAGE_0TH_MIN                 =  .7
LEVERAGE_2ND_MIN                 =  .8
LEVERAGE_98TH_MAX                = 1.1
LEVERAGE_100TH_MAX               = 1.2

def check_constraints(positions, transactions, algorithm_returns, risk_exposures):
    position_concentration_98    = 0
    position_concentration_100   = 0
    leverage_0                   = 0
    leverage_2                   = 0
    leverage_98                  = 0
    leverage_100                 = 0
    rolling_mean_turnover_0      = 0
    rolling_mean_turnover_2      = 0
    rolling_mean_turnover_98     = 0
    rolling_mean_turnover_100    = 0
    net_exposure_98              = 0
    net_exposure_100             = 0
    beta_98                      = 0
    beta_100                     = 0
    abs_mean_sector_exposure_98  = 0
    abs_mean_sector_exposure_100 = 0
    abs_mean_style_exposure_98   = 0
    abs_mean_style_exposure_100  = 0
    percent_in_qtu_0             = 0
    percent_in_qtu_2             = 0
    cumulative_algorithm_returns = 0
    constraints_met              = 0
    num_constraints              = 9
    sector_constraints           = True
    style_constraints            = True
    pconc_result = lv_result = trnovr_result = netexp_result = beta_result = sctr_result = style_result = trading_result = retrns_result = ''

    # Position Concentration Constraint
    try:
        percent_allocations = pf.pos.get_percent_alloc(positions[5:])
        daily_absolute_percent_allocations = percent_allocations.abs().drop('cash', axis=1)
        daily_max_absolute_position        = daily_absolute_percent_allocations.max(axis=1)

        position_concentration_98  = daily_max_absolute_position.quantile(0.98)
        position_concentration_100 = daily_max_absolute_position.max()
    except IndexError:
        position_concentration_98  = -1
        position_concentration_100 = -1

    if (position_concentration_98 > POSITION_CONCENTRATION_98TH_MAX):
        pconc_result = 'FAIL: 98th percentile position concentration %.2f > %.1f' % (
        position_concentration_98*100,
        POSITION_CONCENTRATION_98TH_MAX*100
    )
    elif (position_concentration_100 > POSITION_CONCENTRATION_100TH_MAX):
        pconc_result = 'FAIL: 100th percentile position concentration %.2f > %.1f' % (
        position_concentration_100*100,
        POSITION_CONCENTRATION_100TH_MAX*100
    )
    else:
        pconc_result = 'PASS: Max position concentration %.2f%% <= %.1f%%' % (
            position_concentration_98*100,
            POSITION_CONCENTRATION_98TH_MAX*100
        )
        constraints_met += 1

    # Leverage Constraint
    leverage        = pf.timeseries.gross_lev(positions[5:])
    leverage_0      = leverage.min()
    leverage_2      = leverage.quantile(0.02)
    leverage_98     = leverage.quantile(0.98)
    leverage_100    = leverage.max()
    leverage_passed = True

    if (leverage_0 < LEVERAGE_0TH_MIN):
        lv_result = 'FAIL: Minimum leverage %.2fx < %.1fx' % (
            leverage_0,
            LEVERAGE_0TH_MIN
        )
        leverage_passed = False
    if (leverage_2 < LEVERAGE_2ND_MIN):
        lv_result = 'FAIL: 2nd percentile leverage %.2fx < %.1fx' % (
            leverage_2,
            LEVERAGE_2ND_MIN
        )
        leverage_passed = False
    if (leverage_98 > LEVERAGE_98TH_MAX):
        lv_result = 'FAIL: 98th percentile leverage %.2fx > %.1fx' % (
            leverage_98,
            LEVERAGE_98TH_MAX
        )
        leverage_passed = False
    if (leverage_100 > LEVERAGE_100TH_MAX):
        lv_result = 'FAIL: Maximum leverage %.2fx > %.1fx' % (
            leverage_0,
            LEVERAGE_0TH_MAX
        )
        leverage_passed = False
    if leverage_passed:
        lv_result = 'PASS: Leverage range %.2fx-%.2fx between %.1fx-%.1fx' % (
            leverage_2,
            leverage_98,
            LEVERAGE_2ND_MIN,
            LEVERAGE_98TH_MAX
        )
        constraints_met += 1

    # Turnover Constraint
    turnover = pf.txn.get_turnover(positions, transactions, denominator='portfolio_value')
    # Compute mean rolling 63 trading day turnover.
    rolling_mean_turnover = roll(
        turnover,
        function=pd.Series.mean,
        window=63)[62:]
    rolling_mean_turnover_0      = rolling_mean_turnover.min()
    rolling_mean_turnover_2      = rolling_mean_turnover.quantile(0.02)
    rolling_mean_turnover_98     = rolling_mean_turnover.quantile(0.98)
    rolling_mean_turnover_100    = rolling_mean_turnover.max()
    rolling_mean_turnover_passed = True

    if (rolling_mean_turnover_0 < DAILY_TURNOVER_0TH_MIN):
        trnovr_result = 'FAIL: Minimum turnover %.1f%% < %.1f%%' % (
            rolling_mean_turnover_0*100,
            DAILY_TURNOVER_0TH_MIN*100
        )
        rolling_mean_turnover_passed = False
    if (rolling_mean_turnover_2 < DAILY_TURNOVER_2ND_MIN):
        trnovr_result = 'FAIL: 2nd percentile turnover %.1f%% < %.1fx' % (
            rolling_mean_turnover_2*100,
            DAILY_TURNOVER_2ND_MIN*100
        )
        rolling_mean_turnover_passed = False
    if (rolling_mean_turnover_98 > DAILY_TURNOVER_98TH_MAX):
        trnovr_result = 'FAIL: 98th percentile turnover %.1f%% > %.1fx' % (
            rolling_mean_turnover_98*100,
            DAILY_TURNOVER_98TH_MAX*100
        )
        rolling_mean_turnover_passed = False
    if (rolling_mean_turnover_100 > DAILY_TURNOVER_100TH_MAX):
        trnovr_result = 'FAIL: Maximum turnover %.1f%% > %.1fx' % (
            rolling_mean_turnover_100*100,
            DAILY_TURNOVER_100TH_MAX*100
        )
        rolling_mean_turnover_passed = False
    if rolling_mean_turnover_passed:
        trnovr_result = 'PASS: Mean turnover range %.1f%%-%.1f%% between %.1f%%-%.1f%%' % (
            rolling_mean_turnover_2*100,
            rolling_mean_turnover_98*100,
            DAILY_TURNOVER_2ND_MIN*100,
            DAILY_TURNOVER_98TH_MAX*100
        )
        constraints_met += 1

    # Net Exposure Constraint
    net_exposure     = pf.pos.get_long_short_pos(positions[5:])['net exposure'].abs()
    net_exposure_98  = net_exposure.quantile(0.98)
    net_exposure_100 = net_exposure.max()

    if (net_exposure_98 > NET_EXPOSURE_LIMIT_98TH_MAX):
        netexp_result = 'FAIL: 98th percentile net exposure (absolute value) %.1f > %.1f' % (
        net_exposure_98*100,
        NET_EXPOSURE_LIMIT_98TH_MAX*100
    )
    elif (net_exposure_100 > NET_EXPOSURE_LIMIT_100TH_MAX):
        netexp_result = 'FAIL: 100th percentile net exposure (absolute value) %.1f > %.1f' % (
        net_exposure_100*100,
        NET_EXPOSURE_LIMIT_100TH_MAX*100
    )
    else:
        netexp_result = 'PASS: Net exposure (absolute value) %.1f%% <= %.1f%%' % (
            net_exposure_98*100,
            NET_EXPOSURE_LIMIT_98TH_MAX*100
        )
        constraints_met += 1

    # Beta Constraint
    spy_returns = returns(
        symbols('SPY'),
        algorithm_returns.index[0],
        algorithm_returns.index[-1],
    )
    beta = roll(
        algorithm_returns,
        spy_returns,
        function=ep.beta,
        window=126
    ).reindex_like(algorithm_returns).fillna(0).abs()
    beta_98  = beta.quantile(0.98)
    beta_100 = beta.max()
    if (beta_98 > BETA_TO_SPY_98TH_MAX):
            beta_result = 'FAIL: 98th percentile absolute beta %.2f > %.2f' % (
            beta_98,
            BETA_TO_SPY_98TH_MAX
        )
    elif (beta_100 > BETA_TO_SPY_100TH_MAX):
        beta_result = 'FAIL: 100th percentile absolute beta %.2f > %.2f' % (
            beta_100,
            BETA_TO_SPY_100TH_MAX
        )
    else:
        beta_result = 'PASS: Beta %.2f between +/-%.2f' % (
            beta_98,
            BETA_TO_SPY_98TH_MAX
        )
        constraints_met += 1

    # Risk Exposures
    rolling_mean_risk_exposures = risk_exposures.rolling(63, axis=0).mean()[62:].fillna(0)

    # Sector Exposures
    for sector in SECTORS:
        absolute_mean_sector_exposure = rolling_mean_risk_exposures[sector].abs()
        abs_mean_sector_exposure_98   = absolute_mean_sector_exposure.quantile(0.98)
        abs_mean_sector_exposure_100  = absolute_mean_sector_exposure.max()
        if (abs_mean_sector_exposure_98 > SECTOR_EXPOSURE_98TH_MAX):
            sctr_result = 'FAIL: 98th percentile %s exposure %.2f (absolute value) > %.2f' % (
                sector,
                abs_mean_sector_exposure_98,
                SECTOR_EXPOSURE_98TH_MAX
            )
            sector_constraints = False
        elif (abs_mean_sector_exposure_100 > SECTOR_EXPOSURE_100TH_MAX):
            max_sector_exposure_day = absolute_mean_sector_exposure.idxmax()
            sctr_result = 'FAIL: Max %s exposure %.2f (absolute value) on %s > %.2f' % (
                sector,
                abs_mean_sector_exposure_100,
                max_sector_exposure_day,
                SECTOR_EXPOSURE_100TH_MAX
            )
            sector_constraints = False
    if sector_constraints:
        sctr_result = 'PASS: All sector exposures between +/-%.2f' % SECTOR_EXPOSURE_98TH_MAX
        constraints_met += 1

    # Style Exposures
    for style in STYLES:
        absolute_mean_style_exposure = rolling_mean_risk_exposures[style].abs()
        abs_mean_style_exposure_98   = absolute_mean_style_exposure.quantile(0.98)
        abs_mean_style_exposure_100  = absolute_mean_style_exposure.max()
        if (abs_mean_style_exposure_98 > STYLE_EXPOSURE_98TH_MAX):
            style_result = 'FAIL: 98th percentile %s exposure %.3f (absolute value) > %.2f' % (
                style,
                abs_mean_style_exposure_98,
                STYLE_EXPOSURE_98TH_MAX
            )
            style_constraints = False
        elif (abs_mean_style_exposure_100 > STYLE_EXPOSURE_100TH_MAX):
            max_style_exposure_day = absolute_mean_style_exposure.idxmax()
            style_result = 'FAIL: Max %s exposure %.3f (absolute value) on %s > %.2f' % (
                style,
                abs_mean_style_exposure_100,
                max_style_exposure_day.date(),
                STYLE_EXPOSURE_100TH_MAX
            )
            style_constraints = False
    if style_constraints:
        style_result = 'PASS: All style exposures between +/-%.2f' % STYLE_EXPOSURE_98TH_MAX
        constraints_met += 1

    # Tradable Universe
    positions_wo_cash    = positions.drop('cash', axis=1)
    positions_wo_cash    = positions_wo_cash.abs()
    total_investment     = positions_wo_cash.fillna(0).sum(axis=1)
    daily_qtu_investment = universe.multiply(positions_wo_cash).fillna(0).sum(axis=1)
    percent_in_qtu       = daily_qtu_investment / total_investment
    percent_in_qtu       = percent_in_qtu[5:].fillna(0)
    percent_in_qtu_0     = percent_in_qtu.min()
    percent_in_qtu_2     = percent_in_qtu.quantile(0.02)

    if percent_in_qtu_0 < TRADABLE_UNIVERSE_0TH_MIN:
        min_percent_in_qtu_date = percent_in_qtu.argmin()
        trading_result = 'FAIL: Min investment in QTradableStocksUS %.2f%% on %s < %.1f%%' % (
            percent_in_qtu_0*100,
            min_percent_in_qtu_date.date(),
            TRADABLE_UNIVERSE_0TH_MIN*100
        )
    elif percent_in_qtu_2 < TRADABLE_UNIVERSE_2ND_MIN:
        trading_result = 'FAIL: Investment in QTradableStocksUS (2nd percentile) %.2f%% < %.1f%%' % (
            percent_in_qtu_2*100,
            TRADABLE_UNIVERSE_2ND_MIN*100
        )
    else:
        trading_result = 'PASS: Investment in QTradableStocksUS >= %.1f%%' % (TRADABLE_UNIVERSE_2ND_MIN*100)
        constraints_met += 1

    # Total algorithm_returns Constraint
    cumulative_algorithm_returns = ep.cum_returns_final(algorithm_returns)
    if (cumulative_algorithm_returns > 0):
        retrns_result = 'PASS: Positive' % (
            cumulative_algorithm_returns * 100
        )
        constraints_met += 1
    else:
        retrns_result = 'FAIL: Cumulative algorithm_returns %.1f%% is negative' % (
            cumulative_algorithm_returns * 100
        )

    constraints_result = 'Constraints met {}/{}'.format(constraints_met, num_constraints)
    
    out = pd.DataFrame(index = range(10), columns = ['name', 'vals', 'highlight'])  # dataframe instead, how?
    out.iloc[0] = ['Score'  , '{}' .format('%.4f' % score(algorithm_returns)),           constraints_result]
    out.iloc[1] = ['Returns', '{}%'.format('%.1f' % (cumulative_algorithm_returns * 100)),   retrns_result ]
    out.iloc[2] = ['Positions', '{}|{}'       .format(
                    '%.2f' % (position_concentration_98  * 100),
                    '%.2f' % (position_concentration_100 * 100) ),    pconc_result ]
    out.iloc[3] = ['Leverage', '{}|{}|{}|{}'  .format(
                    '%.2f' % (leverage_0                      ),
                    '%.2f' % (leverage_2                      ),
                    '%.2f' % (leverage_98                     ),
                    '%.2f' % (leverage_100                    ) ),       lv_result ]
    out.iloc[4] = ['Turnover', '{}|{}|{}|{}'   .format(
                    '%.1f' % (rolling_mean_turnover_0    * 100),
                    '%.1f' % (rolling_mean_turnover_2    * 100),
                    '%.1f' % (rolling_mean_turnover_98   * 100),
                    '%.1f' % (rolling_mean_turnover_100  * 100) ),   trnovr_result ]
    out.iloc[5] = ['Net exposure', '{}|{}'     .format(
                    '%.1f' % (net_exposure_98            * 100),    
                    '%.1f' % (net_exposure_100           * 100) ),   netexp_result ]
    out.iloc[6] = ['Beta-to-SPY', '{}|{}'      .format(
                    '%.2f' % (beta_98                         ),    
                    '%.2f' % (beta_100                        ) ),     beta_result ]
    out.iloc[7] = ['Sectors', '{}|{}'          .format(
                    '%.2f' % (abs_mean_sector_exposure_98     ),    
                    '%.2f' % (abs_mean_sector_exposure_100    ) ),     sctr_result ]
    out.iloc[8] = ['Style', '{}|{}'            .format(
                    '%.2f' % (abs_mean_style_exposure_98      ),    
                    '%.2f' % (abs_mean_style_exposure_100     ) ),    style_result ]
    out.iloc[9] = ['Tradable', '{}|{}'         .format(
                    '%.0f' % (percent_in_qtu_0           * 100),    
                    '%.0f' % (percent_in_qtu_2           * 100) ), trading_result ]
    col1width = out.name.map(len).max()     # longest string like 'Net exposure'
    col2width = out.vals.map(len).max()     # longest sets of values

    print ''
    print 'Results: {} to {}'.format(positions.index[0].date(), positions.index[-1].date())
    for i in out.index:
        print '{} {}  {}'.format((out.iloc[i]['name']).ljust(col1width), (out.iloc[i]['vals']).rjust(col2width), out.iloc[i]['highlight'])
     
def evaluate_backtest(positions, transactions, algorithm_returns, risk_exposures):
    if len(positions.index) > 500:
        check_constraints(positions, transactions, algorithm_returns, risk_exposures)
        #compute_score(algorithm_returns[start:end])  # 2 output lines and a chart
    else:
        print 'ERROR: Backtest must be longer than 2 years to be evaluated'
        
# Transform some of the data
positions         = bt.pyfolio_positions
transactions      = bt.pyfolio_transactions
algorithm_returns = bt.daily_performance.returns
factor_exposures  = bt.factor_exposures
start             = positions.index[0]
end               = positions.index[-1]
universe          = get_tradable_universe(start, end)
universe.columns  = universe.columns.map(lambda x: '%s-%s' % (x.symbol, x.sid))

evaluate_backtest(positions, transactions, algorithm_returns, factor_exposures)
                  
Results: 2011-01-04 to 2014-11-21
Score                     4.5065  Constraints met 8/9
Returns                   109.8%  PASS: Positive
Positions              4.47|5.43  PASS: Max position concentration 4.47% <= 5.0%
Leverage     0.74|0.94|1.06|1.17  PASS: Leverage range 0.94x-1.06x between 0.8x-1.1x
Turnover         2.6|2.9|5.7|6.7  FAIL: 2nd percentile turnover 2.9% < 5.0x
Net exposure             2.6|4.9  PASS: Net exposure (absolute value) 2.6% <= 10.0%
Beta-to-SPY            0.12|0.14  PASS: Beta 0.12 between +/-0.30
Sectors                0.18|0.18  PASS: All sector exposures between +/-0.20
Style                  0.37|0.37  PASS: All style exposures between +/-0.40
Tradable                  96|100  PASS: Investment in QTradableStocksUS >= 95.0%
In [15]:
# Run this to evaluate your algorithm. Note that the new contest will require all filters 
#   to pass before a submission is eligible to participate.
#bt.create_full_tear_sheet()