https://www.quantopian.com/help/fundamentals
Morningstar financial health grade is a convenient tool for users to incorporate distance to default to trading strategies. However, it is not easily accessible outside quantopian platform. Under this motivation, I tried to replicate the calculation of financial health grade.
The method that uses standard estimation on equity volatility gives 43% accuracy of grade A, and 67% accuracy of grade A and B. (on 2018-08-01 data)
Lately, it is interesting that even grade D and F can have a low probaility of default (<0.01). That may due to a narrower range in probaility of default while Morningstar ranks the distance to default instead of probability of default.
I did not include dividend in the calculation. You may try to include that and see the difference.
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
from quantopian.pipeline.factors import CustomFactor
from quantopian.pipeline.data import Fundamentals
from quantopian.pipeline.filters import QTradableStocksUS
from quantopian.pipeline.data.builtin import USEquityPricing
import numpy as np
import pandas as pd
import math
from scipy.stats import norm
from scipy.optimize import minimize
https://corporate.morningstar.com/us/documents/Indexes/Morningstar+Indexes+DtD+Methodology.pdf
class DD_optimize (CustomFactor):
inputs = [Fundamentals.market_cap, Fundamentals.total_liabilities_net_minority_interest, USEquityPricing.close]
window_length = 252
def compute(self, today, assets, out, tot_equity,tot_liabilities, close): #shape = M*N M= days, N=stocks
mertons = []
for E, D, col_close in zip(tot_equity.T, tot_liabilities.T, close.T):#shape N*M and loop through each row
T=1. # maturity
r_f=0.03 # risk-free
def vol(col_close): #standard deviation on weighted average log return
df = pd.DataFrame(col_close)
df = df.pct_change()
df = np.log(1 + df)
return df.ewm(halflife=0.97).std().iloc[-1][0]
sigma_e = vol(col_close)*math.sqrt(252)
def equations(p):
v_a, sigma_a = p
d1 = (np.log(v_a/D[-1]) + (r_f + 0.5*sigma_a**2)*T)/(sigma_a*np.sqrt(T))
d2 = d1 - sigma_a*np.sqrt(T)
y1 = E[-1] - (v_a*norm.cdf(d1) - D[-1]*np.exp(-r_f*T)*norm.cdf(d2))
y2 = sigma_e*E[-1] - norm.cdf(d1)*v_a*sigma_a
return y1**2 + y2**2
res = minimize(equations, [E[-1], sigma_e], method='nelder-mead') #nelder-mead method
Asset_value = res.x[0]
Asset_sigma = res.x[1]
numerator = np.log(Asset_value / D[-1]) + ( r_f - ((Asset_sigma**2) / 2))*T
#norm.cdf(-mertons[0])
mertons.append(numerator / (Asset_sigma*math.sqrt(T)))
out[:] = mertons
https://gladmainnew.morningstar.com/directhelp/Methodology_StockFinancialHealth.pdf
class DD_optimize_2 (CustomFactor):
inputs = [Fundamentals.market_cap, Fundamentals.total_liabilities_net_minority_interest, USEquityPricing.close]
window_length = 252
def compute(self, today, assets, out, tot_equity,tot_liabilities, close): #shape = M*N M= days, N=stocks
mertons = []
for E, D, col_close in zip(tot_equity.T, tot_liabilities.T, close.T): #shape N*M and loop through each row
T=1. # maturity
r_f=0.03 # risk-free
def vol(col_close): #standard deviation on log return
df = pd.DataFrame(col_close)
df = df.pct_change()
df = np.log(1 + df)
return df.std()[0]
sigma_e = vol(col_close)*math.sqrt(252) #annualized
def equations(p):
v_a, sigma_a = p
d1 = (np.log(v_a/D[-1]) + (r_f + 0.5*sigma_a**2)*T)/(sigma_a*np.sqrt(T))
d2 = d1 - sigma_a*np.sqrt(T)
y1 = E[-1] - (v_a*norm.cdf(d1) - D[-1]*np.exp(-r_f*T)*norm.cdf(d2))
y2 = sigma_e*E[-1] - norm.cdf(d1)*v_a*sigma_a
return y1**2 + y2**2 #optimization function
res = minimize(equations, [E[-1], sigma_e], method='nelder-mead') #nelder-mead method
Asset_value = res.x[0]
Asset_sigma = res.x[1]
numerator = np.log(Asset_value / D[-1]) + ( r_f - ((Asset_sigma**2) / 2))*T
#norm.cdf(-mertons[0])
mertons.append(numerator / (Asset_sigma*math.sqrt(T)))
out[:] = mertons
pipe = Pipeline()
screen = QTradableStocksUS()
pipe.add(DD_optimize(mask=screen), 'DD_weightedAverage')
pipe.add(DD_optimize_2(mask=screen), 'DD_standardSD')
PD = Fundamentals.financial_health_grade.latest
pipe.add(PD, 'PD')
pipe.set_screen(screen)
pipe.show_graph()
results = run_pipeline(pipe,'2018-8-1', '2018-8-1')
results.dropna(inplace=True)
results.head()
results['PD standardSD'] = norm.cdf(-results['DD_standardSD'])
results['PD weightedAverage'] = norm.cdf(-results['DD_weightedAverage'])
results.head()
Filter by 0.9 quantile and groupby A to F grade.
Check how many percent of stocks are classified similar to morningstar method
count_WA = results[results['DD_weightedAverage'] >= results['DD_weightedAverage'].quantile(0.9)].groupby('PD').count()
count_SD = results[results['DD_standardSD'] >= results['DD_standardSD'].quantile(0.9)].groupby('PD').count()
count_PD = results.groupby('PD').count()
count_WA.div(count_PD)['DD_weightedAverage']
count_SD.div(count_PD)['DD_standardSD']
Filter by 0.7 quantile and groupby A to F grade.
Check how many percent of stocks are classified similar to morningstar method
count_WA = results[results['DD_weightedAverage'] >= results['DD_weightedAverage'].quantile(0.7)].groupby('PD').count()
count_SD = results[results['DD_standardSD'] >= results['DD_standardSD'].quantile(0.7)].groupby('PD').count()
count_WA.div(count_PD)['DD_weightedAverage']
count_SD.div(count_PD)['DD_standardSD']
PD_WA = results[results['PD weightedAverage'] <=0.01]
PD_WA.groupby('PD').count().div(count_PD)['PD weightedAverage']
PD_SD = results[results['PD standardSD'] <=0.01]
PD_SD.groupby('PD').count().div(count_PD)['PD standardSD']