Upload 4 files
Browse files- .gitattributes +1 -0
- cfg.py +170 -0
- cleaned_training.csv +3 -0
- submission.csv +0 -0
- train.py +168 -0
.gitattributes
CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
60 |
+
cleaned_training.csv filter=lfs diff=lfs merge=lfs -text
|
cfg.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
DROP_LIST = ['ID',
|
2 |
+
'外資券商_分點進出',
|
3 |
+
'外資券商_前1天分點進出',
|
4 |
+
'外資券商_前2天分點進出',
|
5 |
+
'外資券商_前3天分點進出',
|
6 |
+
'外資券商_前4天分點進出',
|
7 |
+
'外資券商_前5天分點進出',
|
8 |
+
'外資券商_前6天分點進出',
|
9 |
+
'外資券商_前7天分點進出',
|
10 |
+
'外資券商_前8天分點進出',
|
11 |
+
'外資券商_前9天分點進出',
|
12 |
+
'外資券商_前10天分點進出',
|
13 |
+
'外資券商_前11天分點進出',
|
14 |
+
'外資券商_前12天分點進出',
|
15 |
+
'外資券商_前13天分點進出',
|
16 |
+
'外資券商_前14天分點進出',
|
17 |
+
'外資券商_前15天分點進出',
|
18 |
+
'外資券商_前16天分點進出',
|
19 |
+
'外資券商_前17天分點進出',
|
20 |
+
'外資券商_前18天分點進出',
|
21 |
+
'外資券商_前19天分點進出',
|
22 |
+
'外資券商_前20天分點進出',
|
23 |
+
'主力券商_分點進出',
|
24 |
+
'主力券商_前1天分點進出',
|
25 |
+
'主力券商_前2天分點進出',
|
26 |
+
'主力券商_前3天分點進出',
|
27 |
+
'主力券商_前4天分點進出',
|
28 |
+
'主力券商_前5天分點進出',
|
29 |
+
'主力券商_前6天分點進出',
|
30 |
+
'主力券商_前7天分點進出',
|
31 |
+
'主力券商_前8天分點進出',
|
32 |
+
'主力券商_前9天分點進出',
|
33 |
+
'主力券商_前10天分點進出',
|
34 |
+
'主力券商_前11天分點進出',
|
35 |
+
'主力券商_前12天分點進出',
|
36 |
+
'主力券商_前13天分點進出',
|
37 |
+
'主力券商_前14天分點進出',
|
38 |
+
'主力券商_前15天分點進出',
|
39 |
+
'主力券商_前16天分點進出',
|
40 |
+
'主力券商_前17天分點進出',
|
41 |
+
'主力券商_前18天分點進出',
|
42 |
+
'主力券商_前19天分點進出',
|
43 |
+
'主力券商_前20天分點進出',
|
44 |
+
'日外資_外資買張',
|
45 |
+
'日外資_外資賣張',
|
46 |
+
'日外資_外資買賣超',
|
47 |
+
'日外資_外資持股異動',
|
48 |
+
'日外資_外資持股張數',
|
49 |
+
'日外資_外資買金額(千)',
|
50 |
+
'日外資_外資賣金額(千)',
|
51 |
+
'日外資_外資買賣超金額(千)',
|
52 |
+
'日外資_外資持股比率(%)',
|
53 |
+
'日外資_外資持股市值(百萬)',
|
54 |
+
'日外資_外資尚可投資張數',
|
55 |
+
'日外資_外資尚可投資比率(%)',
|
56 |
+
'日外資_外資投資上限比率(%)',
|
57 |
+
'日自營_自營商買張',
|
58 |
+
'日自營_自營商賣張',
|
59 |
+
'日自營_自營商買賣超',
|
60 |
+
'日自營_自營商買張(自行買賣)',
|
61 |
+
'日自營_自營商賣張(自行買賣)',
|
62 |
+
'日自營_自營商買賣超(自行買賣)',
|
63 |
+
'日自營_自營商買張(避險)',
|
64 |
+
'日自營_自營商賣張(避險)',
|
65 |
+
'日自營_自營商買賣超(避險)',
|
66 |
+
'日自營_自營商庫存',
|
67 |
+
'日自營_自營商買金額(千)',
|
68 |
+
'日自營_自營商賣金額(千)',
|
69 |
+
'日自營_自營商買賣超金額(千)',
|
70 |
+
'日自營_自營商持股比率(%)',
|
71 |
+
'日自營_自營商持股市值(百萬)',
|
72 |
+
'日投信_投信買張',
|
73 |
+
'日投信_投信賣張',
|
74 |
+
'日投信_投信買賣超',
|
75 |
+
'日投信_投信庫存',
|
76 |
+
'日投信_投信買金額(千)',
|
77 |
+
'日投信_投信賣金額(千)',
|
78 |
+
'日投信_投信買賣超金額(千)',
|
79 |
+
'日投信_投信持股比率(%)',
|
80 |
+
'日投信_投信持股市值(百萬)',
|
81 |
+
'技術指標_週MACD',
|
82 |
+
'技術指標_週DIF-週MACD',
|
83 |
+
'技術指標_週DIF',
|
84 |
+
'技術指標_週-DI(14)',
|
85 |
+
'技術指標_週ADX(14)',
|
86 |
+
'技術指標_週+DI(14)',
|
87 |
+
'技術指標_相對強弱比(週)',
|
88 |
+
'技術指標_相對強弱比(日)',
|
89 |
+
'技術指標_近一月歷史波動率(%)',
|
90 |
+
'技術指標_乖離率(20日)',
|
91 |
+
'技術指標_RSI(5)',
|
92 |
+
'技術指標_RSI(10)',
|
93 |
+
'技術指標_MACD',
|
94 |
+
'技術指標_K(9)',
|
95 |
+
'技術指標_EWMA波動率(%)',
|
96 |
+
'技術指標_DIF-MACD',
|
97 |
+
'技術指標_DIF',
|
98 |
+
'技術指標_+DI(14)',
|
99 |
+
'技術指標_-DI(14)',
|
100 |
+
'技術指標_D(9)',
|
101 |
+
'技術指標_Beta係數(21D)',
|
102 |
+
'技術指標_ADX(14)',
|
103 |
+
'技術指標_保力加通道–頂部(20)',
|
104 |
+
'技術指標_保力加通道–均線(20)',
|
105 |
+
'技術指標_保力加通道–底部(20)',
|
106 |
+
'技術指標_SAR',
|
107 |
+
'技術指標_TR(1)',
|
108 |
+
'技術指標_ADXR(14)',
|
109 |
+
'技術指標_+DM(14)',
|
110 |
+
'技術指標_-DM(14)',
|
111 |
+
'技術指標_週TR(14)',
|
112 |
+
'技術指標_週+DM(14)',
|
113 |
+
'技術指標_週-DM(14)',
|
114 |
+
"飆股",
|
115 |
+
'個股收盤價',
|
116 |
+
'個股前1天收盤價',
|
117 |
+
'個股前2天收盤價',
|
118 |
+
'個股前3天收盤價',
|
119 |
+
'個股前4天收盤價',
|
120 |
+
'個股前5天收盤價',
|
121 |
+
'個股前6天收盤價',
|
122 |
+
'個股前7天收盤價',
|
123 |
+
'個股前8天收盤價',
|
124 |
+
'個股前9天收盤價',
|
125 |
+
'個股前10天收盤價',
|
126 |
+
'個股前11天收盤價',
|
127 |
+
'個股前12天收盤價',
|
128 |
+
'個股前13天收盤價',
|
129 |
+
'個股前14天收盤價',
|
130 |
+
'個股前15天收盤價',
|
131 |
+
'個股前16天收盤價',
|
132 |
+
'個股前17天收盤價',
|
133 |
+
'個股前18天收盤價',
|
134 |
+
'個股前19天收盤價',
|
135 |
+
'個股前20天收盤價',
|
136 |
+
'個股1天報酬率',
|
137 |
+
'個股5天報酬率',
|
138 |
+
'個股10天報酬率',
|
139 |
+
'個股20天報酬率',
|
140 |
+
'個股5天波動度',
|
141 |
+
'個股10天波動度',
|
142 |
+
'個股20天波動度',
|
143 |
+
'個股5天乖離率',
|
144 |
+
'個股10天乖離率',
|
145 |
+
'個股19天乖離率',
|
146 |
+
'個股成交量',
|
147 |
+
'個股前1天成交量',
|
148 |
+
'個股前2天成交量',
|
149 |
+
'個股前3天成���量',
|
150 |
+
'個股前4天成交量',
|
151 |
+
'個股前5天成交量',
|
152 |
+
'個股前6天成交量',
|
153 |
+
'個股前7天成交量',
|
154 |
+
'個股前8天成交量',
|
155 |
+
'個股前9天成交量',
|
156 |
+
'個股前10天成交量',
|
157 |
+
'個股前11天成交量',
|
158 |
+
'個股前12天成交量',
|
159 |
+
'個股前13天成交量',
|
160 |
+
'個股前14天成交量',
|
161 |
+
'個股前15天成交量',
|
162 |
+
'個股前16天成交量',
|
163 |
+
'個股前17天成交量',
|
164 |
+
'個股前18天成交量',
|
165 |
+
'個股前19天成交量',
|
166 |
+
'個股前20天成交量',
|
167 |
+
'個股5天成交量波動度',
|
168 |
+
'個股10天成交量波動度',
|
169 |
+
'個股20天成交量波動度',
|
170 |
+
]
|
cleaned_training.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f00e1b54b7230e0949d2ac31034cfa99017698c6df29c9349b6020f58b6d8b65
|
3 |
+
size 311126925
|
submission.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import polars as pl
|
2 |
+
import tsfel
|
3 |
+
import numpy as np
|
4 |
+
from tqdm import tqdm
|
5 |
+
import warnings
|
6 |
+
import os
|
7 |
+
import optuna # Added import for Optuna
|
8 |
+
from sklearn.model_selection import train_test_split, cross_val_score
|
9 |
+
from sklearn.pipeline import Pipeline
|
10 |
+
from sklearn.compose import ColumnTransformer
|
11 |
+
from sklearn.preprocessing import FunctionTransformer
|
12 |
+
from sklearn.base import BaseEstimator, TransformerMixin
|
13 |
+
from xgboost import XGBClassifier
|
14 |
+
from lightgbm import LGBMClassifier
|
15 |
+
|
16 |
+
from cfg import DROP_LIST
|
17 |
+
|
18 |
+
# Rest of the imports and class definitions remain unchanged...
|
19 |
+
|
20 |
+
def load_and_balance_data(filename, ratio=1/30):
|
21 |
+
"""
|
22 |
+
Loads data from a CSV file and balances classes to address potential imbalance.
|
23 |
+
|
24 |
+
Args:
|
25 |
+
filename (str): Path to the CSV file
|
26 |
+
ratio (float): Ratio of positive to negative examples to balance
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
pl.DataFrame: Balanced dataframe
|
30 |
+
"""
|
31 |
+
print(f"Ratio: {ratio}")
|
32 |
+
|
33 |
+
# Load the CSV file
|
34 |
+
data = pl.read_csv(filename)
|
35 |
+
|
36 |
+
positive = data.filter(pl.col("飆股") == 1)
|
37 |
+
negative = data.filter(pl.col("飆股") == 0)
|
38 |
+
|
39 |
+
# Balance the classes
|
40 |
+
negative = negative.sample(fraction=ratio)
|
41 |
+
|
42 |
+
# Combine the balanced classes
|
43 |
+
combined = positive.vstack(negative)
|
44 |
+
|
45 |
+
return combined
|
46 |
+
|
47 |
+
def create_stock_prediction_pipeline(params=None):
|
48 |
+
start_end_list = [
|
49 |
+
# ("個股", "收盤價"),
|
50 |
+
# ("上市加權指數", "收盤價"),
|
51 |
+
# ("外資券商", "分點進出"),
|
52 |
+
# ("主力券商", "分點進出"),
|
53 |
+
# ("個股", "成交量")
|
54 |
+
]
|
55 |
+
|
56 |
+
# Use default parameters if none provided
|
57 |
+
if params is None:
|
58 |
+
classifier = XGBClassifier(booster="dart", n_jobs=-1)
|
59 |
+
else:
|
60 |
+
classifier = XGBClassifier(
|
61 |
+
booster="dart",
|
62 |
+
n_jobs=-1,
|
63 |
+
**params
|
64 |
+
)
|
65 |
+
|
66 |
+
pipeline = Pipeline([
|
67 |
+
# ('time_series_features', TimeSeriesFeatureExtractor(start_end_list=start_end_list)),
|
68 |
+
('classifier', classifier)
|
69 |
+
])
|
70 |
+
return pipeline
|
71 |
+
|
72 |
+
def objective(trial, X, y):
|
73 |
+
"""Optuna objective function for hyperparameter tuning"""
|
74 |
+
|
75 |
+
# Define the hyperparameters to tune
|
76 |
+
params = {
|
77 |
+
'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.3, log=True),
|
78 |
+
'max_depth': trial.suggest_int('max_depth', 3, 10),
|
79 |
+
'n_estimators': trial.suggest_int('n_estimators', 50, 300),
|
80 |
+
'subsample': trial.suggest_float('subsample', 0.5, 1.0),
|
81 |
+
'colsample_bytree': trial.suggest_float('colsample_bytree', 0.5, 1.0),
|
82 |
+
'gamma': trial.suggest_float('gamma', 0, 5),
|
83 |
+
'min_child_weight': trial.suggest_int('min_child_weight', 1, 10),
|
84 |
+
'reg_alpha': trial.suggest_float('reg_alpha', 0, 10),
|
85 |
+
'reg_lambda': trial.suggest_float('reg_lambda', 1, 10)
|
86 |
+
}
|
87 |
+
|
88 |
+
# Create and evaluate the pipeline
|
89 |
+
pipeline = create_stock_prediction_pipeline(params)
|
90 |
+
|
91 |
+
# Use 3-fold cross validation for stability
|
92 |
+
scores = cross_val_score(pipeline, X, y, cv=3, scoring='f1')
|
93 |
+
|
94 |
+
# Return the mean F1 score
|
95 |
+
return scores.mean()
|
96 |
+
|
97 |
+
def main():
|
98 |
+
# Load and preprocess training data
|
99 |
+
print("Loading and preprocessing training data...")
|
100 |
+
combined = load_and_balance_data('./cleaned_training.csv', ratio=1/30)
|
101 |
+
|
102 |
+
# Define features and target
|
103 |
+
X = combined.drop(DROP_LIST)
|
104 |
+
y = combined["飆股"]
|
105 |
+
|
106 |
+
# Split data into training and testing sets
|
107 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
108 |
+
|
109 |
+
# Hyperparameter tuning with Optuna
|
110 |
+
print("Starting hyperparameter optimization with Optuna...")
|
111 |
+
study = optuna.create_study(direction='maximize') # We want to maximize the F1 score
|
112 |
+
study.optimize(lambda trial: objective(trial, X_train, y_train), n_trials=20)
|
113 |
+
|
114 |
+
# Print the best parameters
|
115 |
+
best_params = study.best_params
|
116 |
+
best_value = study.best_value
|
117 |
+
print(f"Best F1 score: {best_value:.4f}")
|
118 |
+
print("Best hyperparameters:", best_params)
|
119 |
+
|
120 |
+
# Create and train the pipeline with the best parameters
|
121 |
+
print("Training final model with the best parameters...")
|
122 |
+
pipeline = create_stock_prediction_pipeline(best_params)
|
123 |
+
pipeline.fit(X_train, y_train)
|
124 |
+
|
125 |
+
# Evaluate the model
|
126 |
+
print("Evaluating the model...")
|
127 |
+
y_pred = pipeline.predict(X_test)
|
128 |
+
|
129 |
+
# Calculate metrics
|
130 |
+
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
|
131 |
+
|
132 |
+
accuracy = accuracy_score(y_test, y_pred)
|
133 |
+
f1 = f1_score(y_test, y_pred)
|
134 |
+
precision = precision_score(y_test, y_pred)
|
135 |
+
recall = recall_score(y_test, y_pred)
|
136 |
+
|
137 |
+
print(f"Accuracy: {accuracy:.4f}")
|
138 |
+
print(f"F1 Score: {f1:.4f}")
|
139 |
+
print(f"Precision: {precision:.4f}")
|
140 |
+
print(f"Recall: {recall:.4f}")
|
141 |
+
|
142 |
+
# Confusion matrix
|
143 |
+
cm = confusion_matrix(y_test, y_pred)
|
144 |
+
print("Confusion Matrix:")
|
145 |
+
print(cm)
|
146 |
+
|
147 |
+
# Load and predict on test data
|
148 |
+
print("Loading test data and making predictions...")
|
149 |
+
test = pl.read_csv('./38_Public_Test_Set_and_Submmision_Template/38_Public_Test_Set_and_Submmision_Template/public_x.csv')
|
150 |
+
template = pl.read_csv('./38_Public_Test_Set_and_Submmision_Template/38_Public_Test_Set_and_Submmision_Template/submission_template_public.csv')
|
151 |
+
|
152 |
+
# Ensure test data has the same columns as training data
|
153 |
+
selected = test.select(X.columns)
|
154 |
+
|
155 |
+
# Make predictions
|
156 |
+
y_pred_test = pipeline.predict(selected)
|
157 |
+
|
158 |
+
# Count predictions
|
159 |
+
unique, counts = np.unique(y_pred_test, return_counts=True)
|
160 |
+
print("Prediction counts:", dict(zip(unique, counts)))
|
161 |
+
|
162 |
+
# Save predictions to submission file
|
163 |
+
template = template.with_columns(pl.Series("飆股", y_pred_test))
|
164 |
+
template.write_csv("submission.csv")
|
165 |
+
print("Predictions saved to submission.csv")
|
166 |
+
|
167 |
+
if __name__ == "__main__":
|
168 |
+
main()
|