Notebook
In [2]:
from quantopian.pipeline import Pipeline, CustomFactor
from quantopian.pipeline.data import EquityPricing, factset
from quantopian.pipeline.factors import Returns, SimpleMovingAverage
from quantopian.pipeline.domain import (
    AT_EQUITIES, # Austria
    AU_EQUITIES, # Australia
    BE_EQUITIES, # Belgium
    BR_EQUITIES, # Brazil
    CA_EQUITIES, # Canada
    CH_EQUITIES, # Switzerland
    CN_EQUITIES, # China
    DE_EQUITIES, # Germany
    DK_EQUITIES, # Denmark
    ES_EQUITIES, # Spain
    FI_EQUITIES, # Finland
    FR_EQUITIES, # France
    GB_EQUITIES, # Great Britain
    HK_EQUITIES, # Hong Kong
    IE_EQUITIES, # Ireland
    IN_EQUITIES, # India
    IT_EQUITIES, # Italy
    JP_EQUITIES, # Japan
    KR_EQUITIES, # South Korea
    NL_EQUITIES, # Netherlands
    NO_EQUITIES, # Norway
    NZ_EQUITIES, # New Zealand
    PT_EQUITIES, # Portugal
    SE_EQUITIES, # Sweden
    SG_EQUITIES, # Singapore
    US_EQUITIES, # United States
)
from quantopian.research import run_pipeline

import pandas as pd
import numpy as np

import time

import matplotlib.pyplot as plt
import seaborn as sns

import empyrical as ep
import alphalens as al
import pyfolio as pf
In [3]:
# Custom factor that gets the minimum volume traded over the last two weeks.
class MinVolume(CustomFactor):
    inputs=[EquityPricing.volume]
    window_length=10
    def compute(self, today, asset_ids, out, values):
        # Calculates the column-wise standard deviation, ignoring NaNs
        out[:] = np.min(values, axis=0)

# Create a volume and price filter that filters for stocks in the top 10%.
# We multiply by price to rule out penny stocks that trade in huge volume.
volume_min = MinVolume()
price = EquityPricing.close.latest
univ_filter = ((price * volume_min).percentile_between(90, 100, mask=(volume_min > 0)))
In [4]:
import numpy as np
import pandas as pd

from bisect import bisect_right
from itertools import combinations, product


def canonical_combination(items, max_length=None):
    for length in range(1, len(items) + 1):
        for subset in combinations(items, length):
            yield subset
        if length == max_length:
            break


def lift(arr):
    total = arr.sum(dtype=float)
    numerator = support(arr)
    denominator = (arr[0, :].sum() / total) * (arr[:, 0].sum() / total)
    return numerator / denominator


def support(arr):
    return arr[0][0] / arr.sum(dtype=float)


def confidence(arr):
    count = arr[0][0]
    return max(count / arr[0, :].sum(), count / arr[:, 0].sum())

class ContrastSetLearner:
    def __init__(self, frame, group_feature, num_parts=3, max_unique_reals=15,
                 sep='=>', max_rows=None, min_unique_objects=2,
                 max_real_bias=0.6):

        try:
            # test that the group feature exists as a column
            frame[group_feature]
        except KeyError:
            raise

        if num_parts < 1:
            num_parts = 1

        # if so-many rows are desired, select those-many rows
        if max_rows:
            frame = pd.DataFrame(frame.iloc[:max_rows])

        # retrieve discrete features, i.e. categorical and boolean, as object
        subset = frame.select_dtypes(['category', 'bool', 'object'])

        # append the feature to its attribute, making it attribute := value
        bad_cols = []
        for col in subset.columns:
            unique_objs = list(subset[col].unique())

            # remove all features which few unique items; low quality
            if len(unique_objs) < min_unique_objects:
                bad_cols.append(col)
                logging.debug("'{}' lacks objects: {}".format(col, unique_objs))
                continue

            frame[col] = col + sep + subset[col].astype(str)

        # retrieve continuous features, i.e. float and int, as number
        subset = frame.select_dtypes(['number'])
        subset = subset.fillna(0)

        # repeat the appending process above, but for real-values
        for col in subset.columns:
            series = subset[col]
            arr = list(series.sort_values().unique())
            max_bias = series.value_counts(normalize=True).max()

            # remove all features which have values with a high frequency bias
            if max_bias > max_real_bias:
                bad_cols.append(col)
                continue

            # if numeric feature has many unique values, partition into chunks
            if len(arr) > max_unique_reals:

                # if there are so-few unique places, only make 1 partition
                if len(arr) <= num_parts:
                    parts = np.array_split(arr, 1)  # make one partition
                else:
                    parts = np.array_split(arr, num_parts)  # what you'd want

                # partitions have (lower, upper) value; use lower to get index
                values = list(map(lambda x: (x[0], x[-1]), parts))
                lwr = list(map(lambda x: x[0], values))

                # determine which (lower, upper) range this value falls into
                series = series.apply(lambda x: values[bisect_right(lwr, x)-1])
                frame[col] = col + sep + series.astype(str)

            # if numeric feature has few unique values, append it like object
            else:
                frame[col] = col + sep + subset[col].astype(str)

        frame.drop(bad_cols, axis=1, inplace=True)

        metadata = {}
        for col in frame:

            # add all the states pointing to their features to the metadata
            states = list(frame[col].unique())
            for ix, state in enumerate(states):
                element = {state: {'pos': ix, 'feature': col}}
                metadata.setdefault('states', {}).update(element)

            # add all the features pointing to their states to the metadata
            metadata.setdefault('features', {}).update({col: states})
        metadata.update({'group_feature': group_feature, 'shape': frame.shape})
        self.metadata = metadata

        try:
            # get the contrast group, remove from frame, and make items as list
            group_values = pd.Series(frame[group_feature])
            frame.drop(group_feature, axis=1, inplace=True)

            # merge all features into series; throw exception if no items exist
            items = pd.Series(frame.apply(lambda x: tuple(x), 1), name='items')
            if all(items.isnull()):
                raise ValueError("No items; revise parameter values.")

            # merge group values and items as DataFrame, and count frequency
            dummy_frame = pd.concat([group_values, items], axis=1)
            counts = dummy_frame.groupby(list(dummy_frame.columns)).size()

            # data is list containing the items, its group, and count
            self.data = counts.reset_index(name='count').to_dict('records')
            self.group = group_feature  # feature to contrast, aka. column name
            self.counts = {}
        except ValueError as e:
            print(e)

    def learn(self, max_length=2, max_records=None, max_contingency_matrix=None):
        if max_records:
            self.data = self.data[:max_records]

        # get number of states for the feature
        num_states = len(self.metadata['features'][self.group])

        # we intend, in this block, to compute counts for the rule across groups
        stop_learning = False
        for i, rec in enumerate(self.data):
            state, items, count = rec[self.group], rec['items'],rec['count']
            for rule in canonical_combination(items, max_length):
                if rule not in self.counts:
                    self.counts[rule] = np.zeros((2, num_states))
                    if len(self.counts) == max_contingency_matrix:
                        stop_learning = True

                # update the rule (row 0) count given the column index of state
                contingency_matrix = self.counts[rule]

                # get columnar position of the group state and update matrix
                pos = self.metadata['states'][state]['pos']
                contingency_matrix[0][pos] += count
            if stop_learning:
                break

        # compute the counts for the not-rule
        for i, rule in enumerate(self.counts):
            # given rule, compute all not-rules possibilities
            rule_negations = self.get_rule_negations(rule)

            # for each not-rule, fetch its counts and add to not-rule (row 1)
            for rule_negated in rule_negations:
                if rule_negated in self.counts:
                    rule_negated_count = self.counts[rule_negated][0]
                    self.counts[rule][1] += rule_negated_count

        # serves as an upper-bound for how many rules there could be
        return len(self.counts)

    def get_rule_negations(self, rule):
        if not isinstance(rule, tuple) or not len(rule) > 0:
            msg = '`rule` must be tuple; see `self.counts` keys for examples.'
            raise ValueError(msg)

        # stores all not-components, i.e. [size = S, size = L], [height = tall]
        iterables = []

        # for each rule component, fetch its feature, and get all other states
        for component in rule:

            # only rules in the metadata, under states key, are accepted
            if component not in self.metadata['states']:
                raise KeyError(component + " is an invalid rule; see metadata.")

            # fetch the feature given the desired state, or component
            feature = self.metadata['states'][component]['feature']
            all_components = list(self.metadata['features'][feature])

            # remove the rule component, leaving only not-components
            all_components.remove(component)
            iterables.append(all_components)

        # compute negation-combinations
        negations = list(product(*iterables))
        return negations

    def score(self, min_support=0.1, min_support_count=10, min_difference=2,
              min_lift=2.0, min_confidence=0.75):
        # read the metadata and map group-states to their column number
        states = self.metadata['features'][self.group]
        state_positions = {self.metadata['states'][s]['pos']: s for s in states}

        # for storing all the statistically significant rules
        data = []

        # iterate over all rules and their contingency matrix
        for i, rule in enumerate(self.counts):
            contingency_matrix = self.counts[rule]

            # for each group (column), extract-out all other columns
            for col_num in range(np.shape(contingency_matrix)[1]):
                this_column = contingency_matrix[:, col_num][:, np.newaxis]
                not_columns = np.delete(contingency_matrix, col_num, axis=1)

                # compute the row-wise sum for the not-columns
                not_column_sum = not_columns.sum(axis=1)[:, np.newaxis]

                # join current and not-columns to give 2 x 2 contingency matrix
                two_by_two = np.hstack((this_column, not_column_sum))

                # skip if rule difference across groups is not large
                if abs(np.subtract(*two_by_two[0])) <= min_difference:
                    continue

                # if the rule, in the group, is infrequent, continue on
                if two_by_two[0][0] <= min_support_count:
                    continue

                # fetch the actual statistical metric outputs
                support_out = support(two_by_two)
                lift_out = lift(two_by_two)
                conf_out = confidence(two_by_two)

                # assert the statistical outputs exceed the cutoffs
                conditions = [support_out > min_support,
                              conf_out > min_confidence,
                              lift_out > min_lift]

                # append good rules, and its group, to what will be a DataFrame
                if all(conditions):
                    group = state_positions[col_num]
                    row = {'rule': rule, 'group': group, 'lift': lift_out}
                    data.append(row)

        # save the resulting rules to a DataFrame and sort by lift
        frame = pd.DataFrame(data)
        if len(frame) > 0:
            frame.sort_values('lift', ascending=False, inplace=True)
        return frame
In [26]:
import pandas as pd
import numpy as np
import scipy as sp
from sklearn.cross_decomposition import PLSRegression
from quantopian.pipeline.data.factset import RBICSFocus
from sklearn.qda import QDA
from quantopian.pipeline.data.factset import Fundamentals as ft_fun
from quantopian.pipeline.data.factset.estimates import PeriodicConsensus
from quantopian.pipeline.data.factset.estimates import Actuals
from collections import OrderedDict
import statsmodels.api as smapi

price_sales_qf = (ft_fun.sales_qf.latest / EquityPricing.close.latest)
price_salef_af = (ft_fun.sales_ltm.latest / EquityPricing.close.latest)


price_qf1_sales = ( PeriodicConsensus.slice('SALES', 'qf', 1).mean.latest / EquityPricing.close.latest )

price_fy1_sales = ( PeriodicConsensus.slice('SALES', 'af', 1).mean.latest / EquityPricing.close.latest )
price_fy2_sales = ( PeriodicConsensus.slice('SALES', 'af', 2).mean.latest / EquityPricing.close.latest )

price_eps_qf = ( Actuals.slice('EPS', 'qf', 0).actual_value.latest / EquityPricing.close.latest )
price_eps_af = ( Actuals.slice('EPS', 'af', 0).actual_value.latest / EquityPricing.close.latest )
price_qf1_eps = ( PeriodicConsensus.slice('EPS', 'qf', 1).mean.latest / EquityPricing.close.latest )
price_fy1_eps = ( PeriodicConsensus.slice('EPS', 'af', 1).mean.latest / EquityPricing.close.latest )
price_fy2_eps = ( PeriodicConsensus.slice('EPS', 'af', 2).mean.latest / EquityPricing.close.latest )
price_fcf_qf = ( ft_fun.free_cf_fcfe_qf.latest / EquityPricing.close.latest )
price_fcf_af = ( ft_fun.free_cf_fcfe_ltm.latest / EquityPricing.close.latest )

price_ocf_qf = ( ft_fun.oper_cf_qf.latest / EquityPricing.close.latest )
price_ocf_af = ( ft_fun.oper_cf_ltm.latest / EquityPricing.close.latest )

cf_total_assets_af = (ft_fun.oper_cf_ltm.latest / ft_fun.assets.latest)
cf_total_assets_qf = (ft_fun.oper_cf_qf.latest / ft_fun.assets.latest)
div_yield_af = ft_fun.div_yld_af.latest
div_yield_cf = ft_fun.div_yld_cf.latest
div_yield_sf = ft_fun.div_yld_saf.latest
ev_ebitda_af = ft_fun.entrpr_val_ebitda_oper_af.latest
ev_ebitda_qf = ft_fun.entrpr_val_ebitda_oper_qf.latest
ev_ebitda_saf = ft_fun.entrpr_val_ebitda_oper_saf.latest
price_ebitda = ( ft_fun.ebitda_oper_ltm.latest / EquityPricing.close.latest )

sales_gr_qf = ft_fun.sales_gr_qf.latest
sales_gr_af = ft_fun.sales_gr_af.latest
eps_gr_qf = ft_fun.eps_basic_gr_qf.latest
eps_gr_af = ft_fun.eps_basic_gr_ltm.latest
assets_gr_qf = ft_fun.assets_gr_qf.latest
assets_gr_af = ft_fun.assets_gr_af.latest
bps_gr_qf = ft_fun.bps_gr_qf.latest
bps_gr_af = ft_fun.bps_gr_af.latest
cf_gr_qf = ft_fun.cf_ps_gr_qf.latest
cf_gr_af = ft_fun.cf_ps_gr_af.latest
asset_turnover_qf = ft_fun.asset_turn_qf.latest
asset_turnover_af = ft_fun.asset_turn_af.latest
curr_ratio_qf = ft_fun.curr_ratio_qf.latest
curr_ratio_af = ft_fun.curr_ratio_af.latest
asset_equity_qf = ft_fun.assets_com_eq_qf.latest
asset_equity_af = ft_fun.assets_com_eq_af.latest
interest_coverage_qf = ft_fun.ebit_oper_int_covg_qf.latest
interest_coverage_qf = ft_fun.ebit_oper_int_covg_af.latest
debt_to_assets_qf = ft_fun.debt_assets_qf.latest
debt_to_assets_af = ft_fun.debt_assets_af.latest
debt_equity_qf = ft_fun.debt_com_eq_qf.latest
debt_equity_qf = ft_fun.debt_com_eq_af.latest
z_score_qf = ft_fun.zscore_qf.latest
z_score_af = ft_fun.zscore_af.latest
wc_sales_qf = ft_fun.sales_wkcap_qf.latest
wc_sales_af = ft_fun.sales_wkcap_af.latest
capex_assets_qf = ft_fun.capex_assets_qf.latest
capex_assets_af = ft_fun.capex_assets_af.latest
capex_sales_af = ft_fun.capex_sales_ltm.latest


features = {
    'price_sales_qf' : price_sales_qf,
    'price_salef_af' : price_salef_af,
    'price_qf1_sales' : price_qf1_sales, 
    'price_fy1_sales ' :price_fy1_sales ,
    'price_fy2_sales ' :price_fy2_sales ,
    'price_eps_qf ' :price_eps_qf ,
    'price_eps_af ' :price_eps_af ,
    'price_qf1_eps ' :price_qf1_eps ,
    'price_fy1_eps ' :price_fy1_eps ,
    'price_fy2_eps ' :price_fy2_eps ,
    'price_fcf_qf ' :price_fcf_qf ,
    'price_fcf_af ' :price_fcf_af ,
    'price_ocf_qf ' :price_ocf_qf ,
    'price_ocf_af ' :price_ocf_af ,
    'cf_total_assets_af ' :cf_total_assets_af ,
    'cf_total_assets_qf ' :cf_total_assets_qf ,
    'div_yield_af ' :div_yield_af ,
    'div_yield_cf ' :div_yield_cf ,
    'div_yield_sf ' :div_yield_sf ,
    'ev_ebitda_af ' :ev_ebitda_af ,
    'ev_ebitda_qf ' :ev_ebitda_qf ,
    'ev_ebitda_saf ' :ev_ebitda_saf ,
    'price_ebitda ' :price_ebitda ,
    'sales_gr_qf ' :sales_gr_qf ,
    'sales_gr_af ' :sales_gr_af ,
    'eps_gr_qf ' :eps_gr_qf ,
    'eps_gr_af ' :eps_gr_af ,
    'assets_gr_qf ' :assets_gr_qf ,
    'assets_gr_af ' :assets_gr_af ,
    'bps_gr_qf ' :bps_gr_qf ,
    'bps_gr_af ' :bps_gr_af ,
    'cf_gr_qf ' :cf_gr_qf ,
    'cf_gr_af ' :cf_gr_af ,
    'asset_turnover_qf ' :asset_turnover_qf ,
    'asset_turnover_af ' :asset_turnover_af ,
    'curr_ratio_qf ' :curr_ratio_qf ,
    'curr_ratio_af ' :curr_ratio_af ,
    'asset_equity_qf ' :asset_equity_qf ,
    'asset_equity_af ' :asset_equity_af ,
    'interest_coverage_qf ' :interest_coverage_qf ,
    'interest_coverage_qf ' :interest_coverage_qf ,
    'debt_to_assets_qf ' :debt_to_assets_qf ,
    'debt_to_assets_af ' :debt_to_assets_af ,
    'debt_equity_qf ' :debt_equity_qf ,
    'debt_equity_qf ' :debt_equity_qf ,
    'z_score_qf ' :z_score_qf ,
    'z_score_af ' :z_score_af ,
    'wc_sales_qf ' :wc_sales_qf ,
    'wc_sales_af ' :wc_sales_af ,
    'capex_assets_qf' : capex_assets_qf ,
    'capex_assets_af' : capex_assets_af ,
    'capex_sales_af' : capex_sales_af,
}


def shift_mask_data(features,
                    l1,
                    n_forward_days):
    features = features[:-n_forward_days]
    l1 = l1[n_forward_days:]
    f1 = features.reshape(-1, features.shape[-1])
    
    return f1, l1.ravel()
 

class ML(CustomFactor):
    def __init__(self, *args, **kwargs):
        CustomFactor.__init__(self, *args, **kwargs)
        
    def _compute(self, *args, **kwargs):
        ret = CustomFactor._compute(self, *args, **kwargs)
        return ret
 
    def _train_model(self, today, skewOne, inputs):
        f1,l1 = shift_mask_data(
            np.dstack(inputs),
            skewOne,
            n_forward_days=26,
        )
        
        f1[~np.isfinite(f1)] = 0.
        f1[~np.isfinite(l1)] = 0.
        l1[~np.isfinite(l1)] = 0.
        
        cols = features.keys()
        df = pd.DataFrame(f1, columns=cols)
        df["Returns"] = pd.qcut(l1, 3, labels=["low", "medium", "high"])
        learner = ContrastSetLearner(df, "Returns")
        learner.learn(max_length=3)
        return learner.score(min_support=0, min_support_count=10, min_difference=0,
              min_lift=2.0, min_confidence=0)
 
    def _maybe_train_model(self, today, skewOne, inputs):
        print(today)
        return self._train_model(today, skewOne, inputs)

 
    def compute(self, today, assets, out, skewOne, *inputs):
        df = self._maybe_train_model(today, skewOne, inputs)
        df = df.drop('lift', axis=1)
        df = df.groupby('group').head(5)
        for row in range(df.shape[0]):
            for col in df.columns:
                print(df[col].values[row])
        out[:] = 1
 
def make_ml_pipeline(universe):
    pipeline_columns = OrderedDict()
    economy_focus = RBICSFocus.l1_name.latest
    returns_25d = Returns(window_length=25)
    pipeline_columns['SkewOne'] = returns_25d
    pipeline_columns.update({
        k: v.rank(mask=universe, groupby=economy_focus) for k, v in features.items()
    })
 
    pipeline_columns['ML'] = ML(
        inputs=pipeline_columns.values(),
        window_length=27,
        mask=universe,
    )
 
    return pipeline_columns['ML']

economy_focus = RBICSFocus.l1_name.latest
ML = make_ml_pipeline(
        univ_filter,
    ).zscore(groupby=economy_focus)
In [28]:
factor_pipe = Pipeline(
        {'factor':ML},
        domain=JP_EQUITIES,
    )

factor_results = run_pipeline(factor_pipe, '2016-06-01', '2016-06-02', chunksize=250)

2016-06-01 00:00:00+00:00
Returns=>low
('price_fcf_qf =>(19.0, 36.0)', 'price_qf1_eps =>(19.0, 36.0)', 'cf_total_assets_af =>(18.0, 35.0)')
Returns=>high
('wc_sales_qf =>(0.0, 18.0)', 'price_eps_af =>(18.0, 35.0)', 'z_score_qf =>(19.0, 36.0)')
Returns=>high
('price_eps_af =>(18.0, 35.0)', 'wc_sales_af =>(0.0, 17.0)', 'capex_assets_af=>(19.0, 36.0)')
Returns=>high
('wc_sales_qf =>(0.0, 18.0)', 'debt_to_assets_af =>(19.0, 36.0)', 'eps_gr_qf =>(0.0, 17.0)')
Returns=>low
('price_qf1_eps =>(19.0, 36.0)', 'price_ocf_qf =>(0.0, 17.0)', 'price_ebitda =>(0.0, 17.0)')
Returns=>low
('price_qf1_eps =>(19.0, 36.0)', 'debt_to_assets_qf =>(18.0, 35.0)', 'price_fy2_sales =>(19.0, 36.0)')
Returns=>high
('price_qf1_eps =>(19.0, 36.0)', 'capex_assets_af=>(19.0, 36.0)', 'eps_gr_qf =>(0.0, 17.0)')
Returns=>high
('wc_sales_qf =>(0.0, 18.0)', 'sales_gr_af =>(18.0, 35.0)', 'z_score_qf =>(19.0, 36.0)')
Returns=>low
('price_qf1_eps =>(19.0, 36.0)', 'bps_gr_qf =>(18.0, 35.0)', 'price_fy2_sales =>(19.0, 36.0)')
Returns=>low
('price_fcf_af =>(19.0, 36.0)', 'price_qf1_eps =>(19.0, 36.0)', 'price_fy2_sales =>(19.0, 36.0)')
Returns=>medium
('sales_gr_af =>(0.0, 17.0)', 'assets_gr_af =>(19.0, 36.0)', 'eps_gr_qf =>(18.0, 35.0)')
Returns=>medium
('ev_ebitda_qf =>(18.0, 34.0)', 'curr_ratio_af =>(0.0, 17.0)', 'eps_gr_qf =>(18.0, 35.0)')
2016-06-02 00:00:00+00:00
Returns=>low
('price_salef_af=>(19.0, 36.0)', 'assets_gr_af =>(37.0, 54.0)')
Returns=>low
('price_salef_af=>(19.0, 36.0)', 'eps_gr_qf =>(36.0, 54.0)')
Returns=>high
('price_eps_af =>(18.0, 35.0)', 'price_qf1_sales=>(18.0, 34.0)', 'eps_gr_qf =>(0.0, 17.0)')
Returns=>low
('bps_gr_qf =>(18.0, 35.0)', 'div_yield_af =>(18.0, 35.0)', 'price_fy2_sales =>(19.0, 36.0)')
Returns=>low
('price_qf1_eps =>(19.0, 36.0)', 'debt_to_assets_qf =>(18.0, 35.0)', 'price_fy2_sales =>(19.0, 36.0)')
Returns=>high
('price_ocf_af =>(0.0, 17.0)', 'eps_gr_qf =>(0.0, 17.0)', 'cf_total_assets_af =>(19.0, 36.0)')
Returns=>high
('price_ocf_af =>(0.0, 17.0)', 'cf_total_assets_qf =>(18.0, 35.0)', 'eps_gr_qf =>(0.0, 17.0)')
Returns=>low
('bps_gr_qf =>(18.0, 35.0)', 'cf_gr_qf =>(37.0, 54.0)')
Returns=>high
('ev_ebitda_qf =>(18.0, 34.0)', 'capex_assets_af=>(19.0, 36.0)', 'eps_gr_qf =>(0.0, 17.0)')
Returns=>high
('wc_sales_qf =>(0.0, 18.0)', 'price_qf1_sales=>(18.0, 34.0)', 'capex_assets_af=>(19.0, 36.0)')
Returns=>medium
('bps_gr_qf =>(36.0, 54.0)', 'cf_total_assets_af =>(37.0, 54.0)')
Returns=>medium
('bps_gr_qf =>(36.0, 54.0)', 'asset_turnover_qf =>(37.0, 54.0)')
Returns=>medium
('ev_ebitda_qf =>(18.0, 34.0)', 'assets_gr_af =>(19.0, 36.0)', 'eps_gr_qf =>(18.0, 35.0)')
Returns=>medium
('price_fy1_sales =>(37.0, 54.0)', 'bps_gr_qf =>(36.0, 54.0)', 'asset_turnover_qf =>(37.0, 54.0)')
Returns=>medium
('wc_sales_qf =>(19.0, 36.0)', 'ev_ebitda_qf =>(18.0, 34.0)', 'interest_coverage_qf =>(0.0, 18.0)')
Pipeline Execution Time: 3 Minutes, 41.38 Seconds