Calcul 20240101-20250514 1082$ 243.819$ => 4,452

This commit is contained in:
Jérôme Delacotte
2025-05-24 16:52:39 +02:00
parent 4c963b7e7c
commit 05815270ae
2 changed files with 1669 additions and 1698 deletions

View File

@@ -349,8 +349,11 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# ========================================================================= # =========================================================================
# Parameters hyperopt # Parameters hyperopt
buy_mid_smooth_3_deriv1 = DecimalParameter(-0.1, 0.1, decimals=2, default=-0.06, space='buy') # buy_mid_smooth_3_deriv1 = DecimalParameter(-0.1, 0.1, decimals=2, default=-0.06, space='buy')
buy_mid_smooth_24_deriv1 = DecimalParameter(-0.6, 0, decimals=2, default=-0.03, space='buy') # buy_mid_smooth_24_deriv1 = DecimalParameter(-0.6, 0, decimals=2, default=-0.03, space='buy')
buy_horizon_predict_1h = IntParameter(1, 12, default=2, space='buy')
buy_level_predict_1h = IntParameter(2, 5, default=4, space='buy')
def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float, time_in_force: str, def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float, time_in_force: str,
current_time: datetime, entry_tag: Optional[str], **kwargs) -> bool: current_time: datetime, entry_tag: Optional[str], **kwargs) -> bool:
@@ -733,6 +736,8 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# informative['deriv1'] = 100 * informative['deriv1'] / informative['mid'] # informative['deriv1'] = 100 * informative['deriv1'] / informative['mid']
# informative['deriv2'] = 1000 * informative['deriv2'] / informative['mid'] # informative['deriv2'] = 1000 * informative['deriv2'] / informative['mid']
# poly_func, x_future, y_future, count = self.polynomial_forecast(informative['sma5_deriv1_1d'], window=24, degree=4)
dataframe = merge_informative_pair(dataframe, informative, self.timeframe, "1d", ffill=True) dataframe = merge_informative_pair(dataframe, informative, self.timeframe, "1d", ffill=True)
dataframe['last_price'] = dataframe['close'] dataframe['last_price'] = dataframe['close']
@@ -815,15 +820,16 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# dataframe['percent_with_previous_day'] = 100 * (dataframe['close'] - dataframe['close_1d']) / dataframe['close'] # dataframe['percent_with_previous_day'] = 100 * (dataframe['close'] - dataframe['close_1d']) / dataframe['close']
# dataframe['percent_with_max_hour'] = 100 * (dataframe['close'] - dataframe['max12_1h']) / dataframe['close'] # dataframe['percent_with_max_hour'] = 100 * (dataframe['close'] - dataframe['max12_1h']) / dataframe['close']
# #
# horizon_h = 24 * 5
# dataframe['futur_percent_1h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-12) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean() # dataframe['futur_percent_1h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-12) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean()
# dataframe['futur_percent_3h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-36) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean() # dataframe['futur_percent_3h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-36) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean()
# dataframe['futur_percent_5h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-60) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean() # dataframe['futur_percent_5h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-60) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean()
# dataframe['futur_percent_12h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-144) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean() # dataframe['futur_percent_12h'] = 100 * ((dataframe['mid_smooth_1h'].shift(-144) - dataframe['mid_smooth_1h']) / dataframe['mid_smooth_1h']).rolling(horizon_h).mean()
#
# dataframe['futur_percent_1d'] = 100 * (dataframe['close'].shift(-1) - dataframe['close']) / dataframe['close'] # dataframe['futur_percent_1d'] = 100 * (dataframe['close'].shift(-1) - dataframe['close']) / dataframe['close']
# dataframe['futur_percent_3d'] = 100 * (dataframe['close'].shift(-3) - dataframe['close']) / dataframe['close'] # dataframe['futur_percent_3d'] = 100 * (dataframe['close'].shift(-3) - dataframe['close']) / dataframe['close']
#
# self.calculateProbabilite2Index(dataframe, ['futur_percent_12h'], 'mid_smooth_deriv1_1d', 'sma24_deriv1_1h') # self.calculateProbabilite2Index(dataframe, ['futur_percent_1d'], 'sma24_deriv1_1h', 'sma5_1d')
return dataframe return dataframe
@@ -915,65 +921,65 @@ class Zeus_8_3_2_B_4_2(IStrategy):
return dataframe return dataframe
# def calculateProbabilite2Index(self, df, futur_cols, indic_1, indic_2): def calculateProbabilite2Index(self, df, futur_cols, indic_1, indic_2):
# # # Définition des tranches pour les dérivées # # Définition des tranches pour les dérivées
# # bins_deriv = [-np.inf, -0.05, -0.01, 0.01, 0.05, np.inf] # bins_deriv = [-np.inf, -0.05, -0.01, 0.01, 0.05, np.inf]
# # labels = ['forte baisse', 'légère baisse', 'neutre', 'légère hausse', 'forte hausse'] # labels = ['forte baisse', 'légère baisse', 'neutre', 'légère hausse', 'forte hausse']
# # #
# # # Ajout des colonnes bin (catégorisation) # # Ajout des colonnes bin (catégorisation)
# # df[f"{indic_1}_bin"] = pd.cut(df['mid_smooth_1h_deriv1'], bins=bins_deriv, labels=labels) # df[f"{indic_1}_bin"] = pd.cut(df['mid_smooth_1h_deriv1'], bins=bins_deriv, labels=labels)
# # df[f"{indic_2}_bin"] = pd.cut(df['mid_smooth_deriv1_1d'], bins=bins_deriv, labels=labels) # df[f"{indic_2}_bin"] = pd.cut(df['mid_smooth_deriv1_1d'], bins=bins_deriv, labels=labels)
# # #
# # # Colonnes de prix futur à analyser # # Colonnes de prix futur à analyser
# # futur_cols = ['futur_percent_1h', 'futur_percent_2h', 'futur_percent_3h', 'futur_percent_4h', 'futur_percent_5h'] # futur_cols = ['futur_percent_1h', 'futur_percent_2h', 'futur_percent_3h', 'futur_percent_4h', 'futur_percent_5h']
# # #
# # # Calcul des moyennes et des effectifs # # Calcul des moyennes et des effectifs
# # grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"])[futur_cols].agg(['mean', 'count']) # grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"])[futur_cols].agg(['mean', 'count'])
# # #
# # pd.set_option('display.width', 200) # largeur max affichage # pd.set_option('display.width', 200) # largeur max affichage
# # pd.set_option('display.max_columns', None)
# pd.set_option('display.max_columns', None) # pd.set_option('display.max_columns', None)
# pd.set_option('display.width', 300) # largeur max affichage pd.set_option('display.max_columns', None)
# pd.set_option('display.width', 300) # largeur max affichage
# # nettoyage
# # series = df[f"{indic_2}"].dropna() # nettoyage
# # unique_vals = df[f"{indic_2}"].nunique() # series = df[f"{indic_2}"].dropna()
# # print(unique_vals) # unique_vals = df[f"{indic_2}"].nunique()
# # print(df[f"{indic_2}"]) # print(unique_vals)
# n = len(self.labels) # print(df[f"{indic_2}"])
# n = len(self.labels)
# df[f"{indic_1}_bin"], bins_1h = pd.qcut(df[f"{indic_1}"], q=n, labels=self.labels, retbins=True,
# duplicates='drop') df[f"{indic_1}_bin"], bins_1h = pd.qcut(df[f"{indic_1}"], q=n, labels=self.labels, retbins=True,
# df[f"{indic_2}_bin"], bins_1d = pd.qcut(df[f"{indic_2}"], q=n, labels=self.labels, retbins=True, duplicates='drop')
# duplicates='drop') df[f"{indic_2}_bin"], bins_1d = pd.qcut(df[f"{indic_2}"], q=n, labels=self.labels, retbins=True,
# # Affichage formaté pour code Python duplicates='drop')
# print(f"Bornes des quantiles pour {indic_1} : [{', '.join([f'{b:.4f}' for b in bins_1h])}]") # Affichage formaté pour code Python
# print(f"Bornes des quantiles pour {indic_2} : [{', '.join([f'{b:.4f}' for b in bins_1d])}]") print(f"Bornes des quantiles pour {indic_1} : [{', '.join([f'{b:.4f}' for b in bins_1h])}]")
# # Agrégation print(f"Bornes des quantiles pour {indic_2} : [{', '.join([f'{b:.4f}' for b in bins_1d])}]")
# grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[futur_cols].agg(['mean', 'count']) # Agrégation
# # Affichage grouped = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[futur_cols].agg(['mean', 'count'])
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # Affichage
# print(grouped.round(4)) with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# # Ajout des probabilités de hausse print(grouped.round(4))
# for col in futur_cols: # Ajout des probabilités de hausse
# df[f"{col}_is_up"] = df[col] > 0 for col in futur_cols:
# df[f"{col}_is_up"] = df[col] > 0
# # Calcul de la proba de hausse
# proba_up = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[f"{col}_is_up"].mean().unstack() # Calcul de la proba de hausse
# proba_up = df.groupby([f"{indic_2}_bin", f"{indic_1}_bin"], observed=True)[f"{col}_is_up"].mean().unstack()
# print(f"\nProbabilité de hausse pour {col} (en %):")
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(f"\nProbabilité de hausse pour {col} (en %):")
# print((proba_up * 100).round(1)) with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print((proba_up * 100).round(1))
# # Affichage formaté des valeurs comme tableau Python
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # Affichage formaté des valeurs comme tableau Python
# df_formatted = (proba_up * 100).round(1) with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# df_formatted = (proba_up * 100).round(1)
# print("data = {")
# for index, row in df_formatted.iterrows(): print("data = {")
# row_values = ", ".join([f"{val:.1f}" for val in row]) for index, row in df_formatted.iterrows():
# print(f"'{index}': [{row_values}], ") row_values = ", ".join([f"{val:.1f}" for val in row])
# print("}") print(f"'{index}': [{row_values}], ")
print("}")
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# dataframe.loc[ # dataframe.loc[
@@ -1145,6 +1151,16 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# print(f"{trade.pair} current_profit={current_profit} count_of_buys={count_of_buys} pct_first={pct_first:.3f} pct_max={pct_max:.3f} lim={lim:.3f} index={index}") # print(f"{trade.pair} current_profit={current_profit} count_of_buys={count_of_buys} pct_first={pct_first:.3f} pct_max={pct_max:.3f} lim={lim:.3f} index={index}")
# self.pairs[trade.pair]['last_palier_index'] = index # self.pairs[trade.pair]['last_palier_index'] = index
# Appel de la fonction
poly_func, x_future, y_future, count = self.polynomial_forecast(
dataframe['sma24_deriv1_1h'],
window=self.buy_horizon_predict_1h.value * 12,
degree=self.buy_level_predict_1h.value,
n_future=3)
if count < 3:
return None
max_amount = self.config.get('stake_amount', 100) * 2.5 max_amount = self.config.get('stake_amount', 100) * 2.5
# stake_amount = min(stake_amount, self.wallets.get_available_stake_amount()) # stake_amount = min(stake_amount, self.wallets.get_available_stake_amount())
stake_amount = min(min(max_amount, self.wallets.get_available_stake_amount()), stake_amount = min(min(max_amount, self.wallets.get_available_stake_amount()),
@@ -1200,33 +1216,33 @@ class Zeus_8_3_2_B_4_2(IStrategy):
return None return None
def getProbaHausse144(self, last_candle): # def getProbaHausse144(self, last_candle):
value_1 = self.getValuesFromTable(self.mid_smooth_24_deriv1_bins, last_candle['mid_smooth_24_deriv1']) # value_1 = self.getValuesFromTable(self.mid_smooth_24_deriv1_bins, last_candle['mid_smooth_24_deriv1'])
value_2 = self.getValuesFromTable(self.sma144_deriv1_bins, last_candle['sma144_deriv1']) # value_2 = self.getValuesFromTable(self.sma144_deriv1_bins, last_candle['sma144_deriv1'])
#
val = self.approx_val_from_bins( # val = self.approx_val_from_bins(
matrice=self.smooth24_sma144_deriv1_matrice_df, # matrice=self.smooth24_sma144_deriv1_matrice_df,
numeric_matrice=self.smooth24_sma144_deriv1_numeric_matrice, # numeric_matrice=self.smooth24_sma144_deriv1_numeric_matrice,
row_label=value_2, # row_label=value_2,
col_label=value_1) # col_label=value_1)
return val # return val
#
def getProbaHausse1h(self, last_candle): # def getProbaHausse1h(self, last_candle):
value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_1h_deriv1']) # value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_1h_deriv1'])
value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma24_deriv1_1h']) # value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma24_deriv1_1h'])
#
val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice, # val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice,
row_label=value_2, # row_label=value_2,
col_label=value_1) # col_label=value_1)
return val # return val
#
def getProbaHausse1d(self, last_candle): # def getProbaHausse1d(self, last_candle):
value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_deriv1_1d']) # value_1 = self.getValuesFromTable(self.mid_smooth_1h_bins, last_candle['mid_smooth_deriv1_1d'])
value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma5_deriv1_1d']) # value_2 = self.getValuesFromTable(self.sma24_deriv1_1h_bins, last_candle['sma5_deriv1_1d'])
#
val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice, row_label=value_2, # val = self.approx_val_from_bins(matrice=self.smooth_smadiff_matrice_df, numeric_matrice=self.smooth_smadiff_numeric_matrice, row_label=value_2,
col_label=value_1) # col_label=value_1)
return val # return val
def adjust_stake_amount(self, pair: str, last_candle: DataFrame): def adjust_stake_amount(self, pair: str, last_candle: DataFrame):
# Calculer le minimum des 14 derniers jours # Calculer le minimum des 14 derniers jours
@@ -1289,69 +1305,69 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# #
# Filtrer les signaux: ne prendre un signal haussier que si dérivée1 > 0 et dérivée2 > 0. # Filtrer les signaux: ne prendre un signal haussier que si dérivée1 > 0 et dérivée2 > 0.
# Détecter les zones de retournement: quand dérivée1 ≈ 0 et que dérivée2 change de signe. # Détecter les zones de retournement: quand dérivée1 ≈ 0 et que dérivée2 change de signe.
# def calculateRegression(self, def calculateRegression(self,
# dataframe: DataFrame, dataframe: DataFrame,
# column= 'close', column= 'close',
# window= 50, window= 50,
# degree=3, degree=3,
# future_offset: int = 10 # projection à n bougies après future_offset: int = 10 # projection à n bougies après
# ) -> DataFrame: ) -> DataFrame:
# df = dataframe.copy() df = dataframe.copy()
#
# regression_fit = [] regression_fit = []
# regression_future_fit = [] regression_future_fit = []
#
# regression_fit = [] regression_fit = []
# regression_future_fit = [] regression_future_fit = []
#
# for i in range(len(df)): for i in range(len(df)):
# if i < window: if i < window:
# regression_fit.append(np.nan) regression_fit.append(np.nan)
# regression_future_fit.append(np.nan) regression_future_fit.append(np.nan)
# continue continue
#
# # Fin de la fenêtre dapprentissage # Fin de la fenêtre dapprentissage
# end_index = i end_index = i
# start_index = i - window start_index = i - window
# y = df[column].iloc[start_index:end_index].values y = df[column].iloc[start_index:end_index].values
#
# # Si les données sont insuffisantes (juste par précaution) # Si les données sont insuffisantes (juste par précaution)
# if len(y) < window: if len(y) < window:
# regression_fit.append(np.nan) regression_fit.append(np.nan)
# regression_future_fit.append(np.nan) regression_future_fit.append(np.nan)
# continue continue
#
# # x centré pour meilleure stabilité numérique # x centré pour meilleure stabilité numérique
# x = np.linspace(-1, 1, window) x = np.linspace(-1, 1, window)
# coeffs = np.polyfit(x, y, degree) coeffs = np.polyfit(x, y, degree)
# poly = np.poly1d(coeffs) poly = np.poly1d(coeffs)
#
# # Calcul point présent (dernier de la fenêtre) # Calcul point présent (dernier de la fenêtre)
# x_now = x[-1] x_now = x[-1]
# regression_fit.append(poly(x_now)) regression_fit.append(poly(x_now))
#
# # Calcul point futur, en ajustant si on dépasse la fin # Calcul point futur, en ajustant si on dépasse la fin
# remaining = len(df) - i - 1 remaining = len(df) - i - 1
# effective_offset = min(future_offset, remaining) effective_offset = min(future_offset, remaining)
# x_future = x_now + (effective_offset / window) * 2 # respect du même pas x_future = x_now + (effective_offset / window) * 2 # respect du même pas
# regression_future_fit.append(poly(x_future)) regression_future_fit.append(poly(x_future))
#
# df[f"{column}_regression"] = regression_fit df[f"{column}_regression"] = regression_fit
# 2. Dérivée première = différence entre deux bougies successives
df[f"{column}_regression_deriv1"] = round(100 * df[f"{column}_regression"].diff() / df[f"{column}_regression"], 4)
# 3. Dérivée seconde = différence de la dérivée première
df[f"{column}_regression_deriv2"] = round(10 * df[f"{column}_regression_deriv1"].rolling(int(window / 4)).mean().diff(), 4)
df[f"{column}_future_{future_offset}"] = regression_future_fit
# # 2. Dérivée première = différence entre deux bougies successives # # 2. Dérivée première = différence entre deux bougies successives
# df[f"{column}_regression_deriv1"] = round(100 * df[f"{column}_regression"].diff() / df[f"{column}_regression"], 4) # df[f"{column}_future_{future_offset}_deriv1"] = round(100 * df[f"{column}_future_{future_offset}"].diff() / df[f"{column}_future_{future_offset}"], 4)
# #
# # 3. Dérivée seconde = différence de la dérivée première # # 3. Dérivée seconde = différence de la dérivée première
# df[f"{column}_regression_deriv2"] = round(10 * df[f"{column}_regression_deriv1"].rolling(int(window / 4)).mean().diff(), 4) # df[f"{column}_future_{future_offset}_deriv2"] = round(10 * df[f"{column}_future_{future_offset}_deriv1"].rolling(int(window / 4)).mean().diff(), 4)
#
# df[f"{column}_future_{future_offset}"] = regression_future_fit return df
#
# # # 2. Dérivée première = différence entre deux bougies successives
# # df[f"{column}_future_{future_offset}_deriv1"] = round(100 * df[f"{column}_future_{future_offset}"].diff() / df[f"{column}_future_{future_offset}"], 4)
# #
# # # 3. Dérivée seconde = différence de la dérivée première
# # df[f"{column}_future_{future_offset}_deriv2"] = round(10 * df[f"{column}_future_{future_offset}_deriv1"].rolling(int(window / 4)).mean().diff(), 4)
#
# return df
def getValuesFromTable(self, values, value): def getValuesFromTable(self, values, value):
for i in range(len(values) - 1): for i in range(len(values) - 1):
@@ -1645,3 +1661,69 @@ class Zeus_8_3_2_B_4_2(IStrategy):
# print(pct) # print(pct)
return i return i
return None # Aucun palier atteint return None # Aucun palier atteint
# def poly_regression_predictions(self, series: pd.Series, window: int = 20, degree: int = 2, n_future: int = 3) -> pd.DataFrame:
# """
# Renvoie une DataFrame avec `n_future` colonnes contenant les extrapolations des n prochains points
# selon une régression polynomiale ajustée sur les `window` dernières valeurs.
# """
# result = pd.DataFrame(index=series.index)
# x = np.arange(window)
#
# for future_step in range(1, n_future + 1):
# result[f'poly_pred_t+{future_step}'] = np.nan
#
# for i in range(window - 1, len(series)):
# y = series.iloc[i - window + 1 : i + 1].values
#
# if np.any(pd.isna(y)):
# continue
#
# coeffs = np.polyfit(x, y, degree)
# poly = np.poly1d(coeffs)
#
# for future_step in range(1, n_future + 1):
# future_x = window - 1 + future_step # Extrapolation point
# result.loc[series.index[i], f'poly_pred_t+{future_step}'] = poly(future_x)
#
# return result
def polynomial_forecast(self, series: pd.Series, window: int = 20, degree: int = 2, n_future: int = 3):
"""
Calcule une régression polynomiale sur les `window` dernières valeurs de la série,
puis prédit les `n_future` prochaines valeurs.
:param series: Série pandas (ex: dataframe['close'])
:param window: Nombre de valeurs récentes utilisées pour ajuster le polynôme
:param degree: Degré du polynôme (ex: 2 pour quadratique)
:param n_future: Nombre de valeurs futures à prédire
:return: tuple (poly_function, x_vals, y_pred), où y_pred contient les prédictions futures
"""
if len(series) < window:
raise ValueError("La série est trop courte pour la fenêtre spécifiée.")
recent_y = series.iloc[-window:].values
x = np.arange(window)
coeffs = np.polyfit(x, recent_y, degree)
poly = np.poly1d(coeffs)
x_future = np.arange(window, window + n_future)
y_future = poly(x_future)
# Affichage de la fonction
# print("Fonction polynomiale trouvée :")
# print(poly_func)
count = 0
for future_step in [12, 24, 36]: #range(1, n_future + 1)
future_x = window - 1 + future_step
prediction = poly(future_x)
# result.loc[series.index[i], f'poly_pred_t+{future_step}'] = prediction
# Afficher les prédictions
# print(f" → t+{future_step}: x={future_x}, y={prediction:.2f}")
if prediction > 0:
count += 1
return poly, x_future, y_future, count

File diff suppressed because it is too large Load Diff