When I use: history(2, '1d', 'high')
all of the sudden i get a bizarre error:cannot concatenate 'str' and 'NoneType' objects
if i only do 1 day history(1, '1d', 'high') , then it works. how can i prevent this?
Is this because some security isn't listed earlier or something?
I tried to find the stocks start dates but none of them seem to be in 2010: print {s.symbol:s.start_date for s in data.keys()}
My code picks the securities like this with a start date: 01/01/2010:
def before_trading_start(context):
"""
Called before the start of each trading day (handle_data) and updates our universe with the securities and values found from fetch_fundamentals
"""
#: Reference fundamentals in a shorter variable
f = fundamentals
#: SQLAlchemy
fundamental_df = get_fundamentals(
query(
f.valuation.market_cap,
f.valuation_ratios.pe_ratio,
f.asset_classification.morningstar_sector_code,
f.operation_ratios.roa,
f.cash_flow_statement.operating_cash_flow
)
# No no ADR or PINK, only USA
.filter(fundamentals.share_class_reference.is_depositary_receipt == False)
.filter(fundamentals.share_class_reference.is_primary_share == True)
.filter(fundamentals.company_reference.primary_exchange_id != "OTCPK")
# .filter(fundamentals.company_reference.country_id == "USA")
# Check for data sanity (i,e. avoid division by zero)
.filter(fundamentals.valuation.market_cap > 10000)
.filter(fundamentals.valuation.shares_outstanding > 0)
# .filter(fundamentals.balance_sheet.invested_capital > 0)
.filter(fundamentals.balance_sheet.common_stock_equity>0)
# .filter(fundamentals.balance_sheet.invested_capital != fundamentals.balance_sheet.cash_and_cash_equivalents)
.order_by(fundamentals.valuation.market_cap.desc())
.limit(100)
)
# the problem here is we get duplicate multple calsses of the shares
secs = np.array([s.symbol for s in fundamental_df.columns.values])
if 0:
for i, s in enumerate(secs):
if s[-2] is '_':
if s[:-2] in secs:
raise(' drop duplicate class shares')
# drop duplicate class shares
#: Set our fundamentals into a context variable
context.fundamental_df = fundamental_df
#: Update our universe with the values
update_universe(fundamental_df.columns.values)