Calcul 20240101-20250514 1082$ 243.819$ => 4,452
This commit is contained in:
@@ -349,8 +349,11 @@ class Zeus_8_3_2_B_4_2(IStrategy):
|
||||
# =========================================================================
|
||||
# Parameters hyperopt
|
||||
|
||||
buy_mid_smooth_3_deriv1 = DecimalParameter(-0.1, 0.1, decimals=2, default=-0.06, space='buy')
|
||||
buy_mid_smooth_24_deriv1 = DecimalParameter(-0.6, 0, decimals=2, default=-0.03, space='buy')
|
||||
# buy_mid_smooth_3_deriv1 = DecimalParameter(-0.1, 0.1, decimals=2, default=-0.06, space='buy')
|
||||
# buy_mid_smooth_24_deriv1 = DecimalParameter(-0.6, 0, decimals=2, default=-0.03, space='buy')
|
||||
buy_horizon_predict_1h = IntParameter(1, 12, default=2, space='buy')
|
||||
buy_level_predict_1h = IntParameter(2, 5, default=4, space='buy')
|
||||
|
||||
|
||||
def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float, time_in_force: str,
|
||||
current_time: datetime, entry_tag: Optional[str], **kwargs) -> bool:
|
||||
@@ -733,6 +736,8 @@ class Zeus_8_3_2_B_4_2(IStrategy):
|
||||
# informative['deriv1'] = 100 * informative['deriv1'] / informative['mid']
|
||||
# informative['deriv2'] = 1000 * informative['deriv2'] / informative['mid']
|
||||
|
||||
# poly_func, x_future, y_future, count = self.polynomial_forecast(informative['sma5_deriv1_1d'], window=24, degree=4)
|
||||
|
||||
dataframe = merge_informative_pair(dataframe, informative, self.timeframe, "1d", ffill=True)
|
||||
|
||||
dataframe['last_price'] = dataframe['close']
|
||||
@@ -815,15 +820,16 @@ class Zeus_8_3_2_B_4_2(IStrategy):
|
||||
# dataframe['percent_with_previous_day'] = 100 * (dataframe['close'] - dataframe['close_1d']) / dataframe['close']
|
||||
# dataframe['percent_with_max_hour'] = 100 * (dataframe['close'] - dataframe['max12_1h']) / dataframe['close']
|
||||
#
|
||||
# horizon_h = 24 * 5
|
||||
# dataframe['futur_percent_1h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-12) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean()
|
||||
# dataframe['futur_percent_3h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-36) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean()
|
||||
# dataframe['futur_percent_5h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-60) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean()
|
||||
# dataframe['futur_percent_12h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-144) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean()
|
||||
|
||||
#
|
||||
# dataframe['futur_percent_1d'] = 100 * (dataframe['close'].shift(-1) - dataframe['close']) / dataframe['close']
|
||||
# dataframe['futur_percent_3d'] = 100 * (dataframe['close'].shift(-3) - dataframe['close']) / dataframe['close']
|
||||
|
||||
# self.calculateProbabilite2Index(dataframe, ['futur_percent_12h'], 'mid_smooth_deriv1_1d', 'sma24_deriv1_1h')
|
||||
#
|
||||
# self.calculateProbabilite2Index(dataframe, ['futur_percent_1d'], 'sma24_deriv1_1h', 'sma5_1d')
|
||||
|
||||
return dataframe
|
||||
|
||||
@@ -915,65 +921,65 @@ class Zeus_8_3_2_B_4_2(IStrategy):
|
||||
|
||||
return dataframe
|
||||
|
||||
# def calculateProbabilite2Index(self, df, futur_cols, indic_1, indic_2):
|
||||
# # # Définition des tranches pour les dérivées
|
||||
# # bins_deriv = [-np.inf, -0.05, -0.01, 0.01, 0.05, np.inf]
|
||||
# # labels = ['forte baisse', 'légère baisse', 'neutre', 'légère hausse', 'forte hausse']
|
||||
# #
|
||||
# # # Ajout des colonnes bin (catégorisation)
|
||||
# # df[f"{indic_1}_bin"] = pd.cut(df['mid_smooth_1h_deriv1'], bins=bins_deriv, labels=labels)
|
||||
# # df[f"{indic_2}_bin"] = pd.cut(df['mid_smooth_deriv1_1d'], bins=bins_deriv, labels=labels)
|
||||
# #
|
||||
# # # Colonnes de prix futur à analyser
|
||||
# # futur_cols = ['futur_percent_1h', 'futur_percent_2h', 'futur_percent_3h', 'futur_percent_4h', 'futur_percent_5h']
|
||||
# #
|
||||
# # # Calcul des moyennes et des effectifs
|
||||
# # grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"])[futur_cols].agg(['mean', 'count'])
|
||||
# #
|
||||
# # pd.set_option('display.width', 200) # largeur max affichage
|
||||
# # pd.set_option('display.max_columns', None)
|
||||
# pd.set_option('display.max_columns', None)
|
||||
# pd.set_option('display.width', 300) # largeur max affichage
|
||||
#
|
||||
# # nettoyage
|
||||
# # series = df[f"{indic_2}"].dropna()
|
||||
# # unique_vals = df[f"{indic_2}"].nunique()
|
||||
# # print(unique_vals)
|
||||
# # print(df[f"{indic_2}"])
|
||||
# n = len(self.labels)
|
||||
#
|
||||
# df[f"{indic_1}_bin"], bins_1h = pd.qcut(df[f"{indic_1}"], q=n, labels=self.labels, retbins=True,
|
||||
# duplicates='drop')
|
||||
# df[f"{indic_2}_bin"], bins_1d = pd.qcut(df[f"{indic_2}"], q=n, labels=self.labels, retbins=True,
|
||||
# duplicates='drop')
|
||||
# # Affichage formaté pour code Python
|
||||
# print(f"Bornes des quantiles pour {indic_1} : [{', '.join([f'{b:.4f}' for b in bins_1h])}]")
|
||||
# print(f"Bornes des quantiles pour {indic_2} : [{', '.join([f'{b:.4f}' for b in bins_1d])}]")
|
||||
# # Agrégation
|
||||
# grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[futur_cols].agg(['mean', 'count'])
|
||||
# # Affichage
|
||||
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
|
||||
# print(grouped.round(4))
|
||||
# # Ajout des probabilités de hausse
|
||||
# for col in futur_cols:
|
||||
# df[f"{col}_is_up"] = df[col] > 0
|
||||
#
|
||||
# # Calcul de la proba de hausse
|
||||
# proba_up = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[f"{col}_is_up"].mean().unstack()
|
||||
#
|
||||
# print(f"\nProbabilité de hausse pour {col} (en %):")
|
||||
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
|
||||
# print((proba_up * 100).round(1))
|
||||
#
|
||||
# # Affichage formaté des valeurs comme tableau Python
|
||||
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
|
||||
# df_formatted = (proba_up * 100).round(1)
|
||||
#
|
||||
# print("data = {")
|
||||
# for index, row in df_formatted.iterrows():
|
||||
# row_values = ", ".join([f"{val:.1f}" for val in row])
|
||||
# print(f"'{index}': [{row_values}], ")
|
||||
# print("}")
|
||||
def calculateProbabilite2Index(self, df, futur_cols, indic_1, indic_2):
|
||||
# # Définition des tranches pour les dérivées
|
||||
# bins_deriv = [-np.inf, -0.05, -0.01, 0.01, 0.05, np.inf]
|
||||
# labels = ['forte baisse', 'légère baisse', 'neutre', 'légère hausse', 'forte hausse']
|
||||
#
|
||||
# # Ajout des colonnes bin (catégorisation)
|
||||
# df[f"{indic_1}_bin"] = pd.cut(df['mid_smooth_1h_deriv1'], bins=bins_deriv, labels=labels)
|
||||
# df[f"{indic_2}_bin"] = pd.cut(df['mid_smooth_deriv1_1d'], bins=bins_deriv, labels=labels)
|
||||
#
|
||||
# # Colonnes de prix futur à analyser
|
||||
# futur_cols = ['futur_percent_1h', 'futur_percent_2h', 'futur_percent_3h', 'futur_percent_4h', 'futur_percent_5h']
|
||||
#
|
||||
# # Calcul des moyennes et des effectifs
|
||||
# grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"])[futur_cols].agg(['mean', 'count'])
|
||||
#
|
||||
# pd.set_option('display.width', 200) # largeur max affichage
|
||||
# pd.set_option('display.max_columns', None)
|
||||
pd.set_option('display.max_columns', None)
|
||||
pd.set_option('display.width', 300) # largeur max affichage
|
||||
|
||||
# nettoyage
|
||||
# series = df[f"{indic_2}"].dropna()
|
||||
# unique_vals = df[f"{indic_2}"].nunique()
|
||||
# print(unique_vals)
|
||||
# print(df[f"{indic_2}"])
|
||||
n = len(self.labels)
|
||||
|
||||
df[f"{indic_1}_bin"], bins_1h = pd.qcut(df[f"{indic_1}"], q=n, labels=self.labels, retbins=True,
|
||||
duplicates='drop')
|
||||
df[f"{indic_2}_bin"], bins_1d = pd.qcut(df[f"{indic_2}"], q=n, labels=self.labels, retbins=True,
|
||||
duplicates='drop')
|
||||
# Affichage formaté pour code Python
|
||||
print(f"Bornes des quantiles pour {indic_1} : [{', '.join([f'{b:.4f}' for b in bins_1h])}]")
|
||||
print(f"Bornes des quantiles pour {indic_2} : [{', '.join([f'{b:.4f}' for b in bins_1d])}]")
|
||||
# Agrégation
|
||||
grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[futur_cols].agg(['mean', 'count'])
|
||||
# Affichage
|
||||
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
|
||||
print(grouped.round(4))
|
||||
# Ajout des probabilités de hausse
|
||||
for col in futur_cols:
|
||||
df[f"{col}_is_up"] = df[col] > 0
|
||||
|
||||
# Calcul de la proba de hausse
|
||||
proba_up = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[f"{col}_is_up"].mean().unstack()
|
||||
|
||||
print(f"\nProbabilité de hausse pour {col} (en %):")
|
||||
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
|
||||
print((proba_up * 100).round(1))
|
||||
|
||||
# Affichage formaté des valeurs comme tableau Python
|
||||
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
|
||||
df_formatted = (proba_up * 100).round(1)
|
||||
|
||||
print("data = {")
|
||||
for index, row in df_formatted.iterrows():
|
||||
row_values = ", ".join([f"{val:.1f}" for val in row])
|
||||
print(f"'{index}': [{row_values}], ")
|
||||
print("}")
|
||||
|
||||
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
# dataframe.loc[
|
||||
@@ -1145,6 +1151,16 @@ class Zeus_8_3_2_B_4_2(IStrategy):
|
||||
# print(f"{trade.pair} current_profit={current_profit} count_of_buys={count_of_buys} pct_first={pct_first:.3f} pct_max={pct_max:.3f} lim={lim:.3f} index={index}")
|
||||
# self.pairs[trade.pair]['last_palier_index'] = index
|
||||
|
||||
# Appel de la fonction
|
||||
poly_func, x_future, y_future, count = self.polynomial_forecast(
|
||||
dataframe['sma24_deriv1_1h'],
|
||||
window=self.buy_horizon_predict_1h.value * 12,
|
||||
degree=self.buy_level_predict_1h.value,
|
||||
n_future=3)
|
||||
|
||||
if count < 3:
|
||||
return None
|
||||
|
||||
max_amount = self.config.get('stake_amount', 100) * 2.5
|
||||
# stake_amount = min(stake_amount, self.wallets.get_available_stake_amount())
|
||||
stake_amount = min(min(max_amount, self.wallets.get_available_stake_amount()),
|
||||
@@ -1200,33 +1216,33 @@ class Zeus_8_3_2_B_4_2(IStrategy):
|
||||
|
||||
return None
|
||||
|
||||
def getProbaHausse144(self, last_candle):
|
||||
value_1 = self.getValuesFromTable(self.mid_smooth_24_deriv1_bins, last_candle['mid_smooth_24_deriv1'])
|
||||
value_2 = self.getValuesFromTable(self.sma144_deriv1_bins, last_candle['sma144_deriv1'])
|
||||
|
||||
val = self.approx_val_from_bins(
|
||||
matrice=self.smooth24_sma144_deriv1_matrice_df,
|
||||
numeric_matrice=self.smooth24_sma144_deriv1_numeric_matrice,
|
||||
row_label=value_2,
|
||||
col_label=value_1)
|
||||
return val
|
||||
|
||||
def getProbaHausse1h(self, last_candle):
|
||||
value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_1h_deriv1'])
|
||||
value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma24_deriv1_1h'])
|
||||
|
||||
val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice,
|
||||
row_label=value_2,
|
||||
col_label=value_1)
|
||||
return val
|
||||
|
||||
def getProbaHausse1d(self, last_candle):
|
||||
value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_deriv1_1d'])
|
||||
value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma5_deriv1_1d'])
|
||||
|
||||
val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice, row_label=value_2,
|
||||
col_label=value_1)
|
||||
return val
|
||||
# def getProbaHausse144(self, last_candle):
|
||||
# value_1 = self.getValuesFromTable(self.mid_smooth_24_deriv1_bins, last_candle['mid_smooth_24_deriv1'])
|
||||
# value_2 = self.getValuesFromTable(self.sma144_deriv1_bins, last_candle['sma144_deriv1'])
|
||||
#
|
||||
# val = self.approx_val_from_bins(
|
||||
# matrice=self.smooth24_sma144_deriv1_matrice_df,
|
||||
# numeric_matrice=self.smooth24_sma144_deriv1_numeric_matrice,
|
||||
# row_label=value_2,
|
||||
# col_label=value_1)
|
||||
# return val
|
||||
#
|
||||
# def getProbaHausse1h(self, last_candle):
|
||||
# value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_1h_deriv1'])
|
||||
# value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma24_deriv1_1h'])
|
||||
#
|
||||
# val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice,
|
||||
# row_label=value_2,
|
||||
# col_label=value_1)
|
||||
# return val
|
||||
#
|
||||
# def getProbaHausse1d(self, last_candle):
|
||||
# value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_deriv1_1d'])
|
||||
# value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma5_deriv1_1d'])
|
||||
#
|
||||
# val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice, row_label=value_2,
|
||||
# col_label=value_1)
|
||||
# return val
|
||||
|
||||
def adjust_stake_amount(self, pair: str, last_candle: DataFrame):
|
||||
# Calculer le minimum des 14 derniers jours
|
||||
@@ -1289,69 +1305,69 @@ class Zeus_8_3_2_B_4_2(IStrategy):
|
||||
#
|
||||
# Filtrer les signaux: ne prendre un signal haussier que si dérivée1 > 0 et dérivée2 > 0.
|
||||
# Détecter les zones de retournement: quand dérivée1 ≈ 0 et que dérivée2 change de signe.
|
||||
# def calculateRegression(self,
|
||||
# dataframe: DataFrame,
|
||||
# column= 'close',
|
||||
# window= 50,
|
||||
# degree=3,
|
||||
# future_offset: int = 10 # projection à n bougies après
|
||||
# ) -> DataFrame:
|
||||
# df = dataframe.copy()
|
||||
#
|
||||
# regression_fit = []
|
||||
# regression_future_fit = []
|
||||
#
|
||||
# regression_fit = []
|
||||
# regression_future_fit = []
|
||||
#
|
||||
# for i in range(len(df)):
|
||||
# if i < window:
|
||||
# regression_fit.append(np.nan)
|
||||
# regression_future_fit.append(np.nan)
|
||||
# continue
|
||||
#
|
||||
# # Fin de la fenêtre d’apprentissage
|
||||
# end_index = i
|
||||
# start_index = i - window
|
||||
# y = df[column].iloc[start_index:end_index].values
|
||||
#
|
||||
# # Si les données sont insuffisantes (juste par précaution)
|
||||
# if len(y) < window:
|
||||
# regression_fit.append(np.nan)
|
||||
# regression_future_fit.append(np.nan)
|
||||
# continue
|
||||
#
|
||||
# # x centré pour meilleure stabilité numérique
|
||||
# x = np.linspace(-1, 1, window)
|
||||
# coeffs = np.polyfit(x, y, degree)
|
||||
# poly = np.poly1d(coeffs)
|
||||
#
|
||||
# # Calcul point présent (dernier de la fenêtre)
|
||||
# x_now = x[-1]
|
||||
# regression_fit.append(poly(x_now))
|
||||
#
|
||||
# # Calcul point futur, en ajustant si on dépasse la fin
|
||||
# remaining = len(df) - i - 1
|
||||
# effective_offset = min(future_offset, remaining)
|
||||
# x_future = x_now + (effective_offset / window) * 2 # respect du même pas
|
||||
# regression_future_fit.append(poly(x_future))
|
||||
#
|
||||
# df[f"{column}_regression"] = regression_fit
|
||||
# # 2. Dérivée première = différence entre deux bougies successives
|
||||
# df[f"{column}_regression_deriv1"] = round(100 * df[f"{column}_regression"].diff() / df[f"{column}_regression"], 4)
|
||||
#
|
||||
# # 3. Dérivée seconde = différence de la dérivée première
|
||||
# df[f"{column}_regression_deriv2"] = round(10 * df[f"{column}_regression_deriv1"].rolling(int(window / 4)).mean().diff(), 4)
|
||||
#
|
||||
# df[f"{column}_future_{future_offset}"] = regression_future_fit
|
||||
#
|
||||
# # # 2. Dérivée première = différence entre deux bougies successives
|
||||
# # df[f"{column}_future_{future_offset}_deriv1"] = round(100 * df[f"{column}_future_{future_offset}"].diff() / df[f"{column}_future_{future_offset}"], 4)
|
||||
# #
|
||||
# # # 3. Dérivée seconde = différence de la dérivée première
|
||||
# # df[f"{column}_future_{future_offset}_deriv2"] = round(10 * df[f"{column}_future_{future_offset}_deriv1"].rolling(int(window / 4)).mean().diff(), 4)
|
||||
#
|
||||
# return df
|
||||
def calculateRegression(self,
|
||||
dataframe: DataFrame,
|
||||
column= 'close',
|
||||
window= 50,
|
||||
degree=3,
|
||||
future_offset: int = 10 # projection à n bougies après
|
||||
) -> DataFrame:
|
||||
df = dataframe.copy()
|
||||
|
||||
regression_fit = []
|
||||
regression_future_fit = []
|
||||
|
||||
regression_fit = []
|
||||
regression_future_fit = []
|
||||
|
||||
for i in range(len(df)):
|
||||
if i < window:
|
||||
regression_fit.append(np.nan)
|
||||
regression_future_fit.append(np.nan)
|
||||
continue
|
||||
|
||||
# Fin de la fenêtre d’apprentissage
|
||||
end_index = i
|
||||
start_index = i - window
|
||||
y = df[column].iloc[start_index:end_index].values
|
||||
|
||||
# Si les données sont insuffisantes (juste par précaution)
|
||||
if len(y) < window:
|
||||
regression_fit.append(np.nan)
|
||||
regression_future_fit.append(np.nan)
|
||||
continue
|
||||
|
||||
# x centré pour meilleure stabilité numérique
|
||||
x = np.linspace(-1, 1, window)
|
||||
coeffs = np.polyfit(x, y, degree)
|
||||
poly = np.poly1d(coeffs)
|
||||
|
||||
# Calcul point présent (dernier de la fenêtre)
|
||||
x_now = x[-1]
|
||||
regression_fit.append(poly(x_now))
|
||||
|
||||
# Calcul point futur, en ajustant si on dépasse la fin
|
||||
remaining = len(df) - i - 1
|
||||
effective_offset = min(future_offset, remaining)
|
||||
x_future = x_now + (effective_offset / window) * 2 # respect du même pas
|
||||
regression_future_fit.append(poly(x_future))
|
||||
|
||||
df[f"{column}_regression"] = regression_fit
|
||||
# 2. Dérivée première = différence entre deux bougies successives
|
||||
df[f"{column}_regression_deriv1"] = round(100 * df[f"{column}_regression"].diff() / df[f"{column}_regression"], 4)
|
||||
|
||||
# 3. Dérivée seconde = différence de la dérivée première
|
||||
df[f"{column}_regression_deriv2"] = round(10 * df[f"{column}_regression_deriv1"].rolling(int(window / 4)).mean().diff(), 4)
|
||||
|
||||
df[f"{column}_future_{future_offset}"] = regression_future_fit
|
||||
|
||||
# # 2. Dérivée première = différence entre deux bougies successives
|
||||
# df[f"{column}_future_{future_offset}_deriv1"] = round(100 * df[f"{column}_future_{future_offset}"].diff() / df[f"{column}_future_{future_offset}"], 4)
|
||||
#
|
||||
# # 3. Dérivée seconde = différence de la dérivée première
|
||||
# df[f"{column}_future_{future_offset}_deriv2"] = round(10 * df[f"{column}_future_{future_offset}_deriv1"].rolling(int(window / 4)).mean().diff(), 4)
|
||||
|
||||
return df
|
||||
|
||||
def getValuesFromTable(self, values, value):
|
||||
for i in range(len(values) - 1):
|
||||
@@ -1645,3 +1661,69 @@ class Zeus_8_3_2_B_4_2(IStrategy):
|
||||
# print(pct)
|
||||
return i
|
||||
return None # Aucun palier atteint
|
||||
|
||||
# def poly_regression_predictions(self, series: pd.Series, window: int = 20, degree: int = 2, n_future: int = 3) -> pd.DataFrame:
|
||||
# """
|
||||
# Renvoie une DataFrame avec `n_future` colonnes contenant les extrapolations des n prochains points
|
||||
# selon une régression polynomiale ajustée sur les `window` dernières valeurs.
|
||||
# """
|
||||
# result = pd.DataFrame(index=series.index)
|
||||
# x = np.arange(window)
|
||||
#
|
||||
# for future_step in range(1, n_future + 1):
|
||||
# result[f'poly_pred_t+{future_step}'] = np.nan
|
||||
#
|
||||
# for i in range(window - 1, len(series)):
|
||||
# y = series.iloc[i - window + 1 : i + 1].values
|
||||
#
|
||||
# if np.any(pd.isna(y)):
|
||||
# continue
|
||||
#
|
||||
# coeffs = np.polyfit(x, y, degree)
|
||||
# poly = np.poly1d(coeffs)
|
||||
#
|
||||
# for future_step in range(1, n_future + 1):
|
||||
# future_x = window - 1 + future_step # Extrapolation point
|
||||
# result.loc[series.index[i], f'poly_pred_t+{future_step}'] = poly(future_x)
|
||||
#
|
||||
# return result
|
||||
|
||||
def polynomial_forecast(self, series: pd.Series, window: int = 20, degree: int = 2, n_future: int = 3):
|
||||
"""
|
||||
Calcule une régression polynomiale sur les `window` dernières valeurs de la série,
|
||||
puis prédit les `n_future` prochaines valeurs.
|
||||
|
||||
:param series: Série pandas (ex: dataframe['close'])
|
||||
:param window: Nombre de valeurs récentes utilisées pour ajuster le polynôme
|
||||
:param degree: Degré du polynôme (ex: 2 pour quadratique)
|
||||
:param n_future: Nombre de valeurs futures à prédire
|
||||
:return: tuple (poly_function, x_vals, y_pred), où y_pred contient les prédictions futures
|
||||
"""
|
||||
if len(series) < window:
|
||||
raise ValueError("La série est trop courte pour la fenêtre spécifiée.")
|
||||
|
||||
recent_y = series.iloc[-window:].values
|
||||
x = np.arange(window)
|
||||
|
||||
coeffs = np.polyfit(x, recent_y, degree)
|
||||
poly = np.poly1d(coeffs)
|
||||
|
||||
x_future = np.arange(window, window + n_future)
|
||||
y_future = poly(x_future)
|
||||
|
||||
# Affichage de la fonction
|
||||
# print("Fonction polynomiale trouvée :")
|
||||
# print(poly_func)
|
||||
|
||||
count = 0
|
||||
for future_step in [12, 24, 36]: #range(1, n_future + 1)
|
||||
future_x = window - 1 + future_step
|
||||
prediction = poly(future_x)
|
||||
# result.loc[series.index[i], f'poly_pred_t+{future_step}'] = prediction
|
||||
|
||||
# ➕ Afficher les prédictions
|
||||
# print(f" → t+{future_step}: x={future_x}, y={prediction:.2f}")
|
||||
if prediction > 0:
|
||||
count += 1
|
||||
|
||||
return poly, x_future, y_future, count
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user