ethanrom commited on
Commit
9ec9085
·
1 Parent(s): 4d09f78

Upload 17 files

Browse files
Files changed (7) hide show
  1. app.py +86 -5
  2. button_click_alt.py +66 -1
  3. converted.png +0 -0
  4. model.h5 +2 -2
  5. model_1.h5 +3 -0
  6. process.py +0 -5
  7. re.csv +79 -0
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
  import tensorflow as tf
3
- from button_click_alt import find_order_id
4
  from button_click_alt import find_order_id_similarity
5
  from flann import generate_images, flann_matching, flann_matching_3, flann_matching_alt, flann_matching_4
6
  import cv2
@@ -8,6 +8,7 @@ import numpy as np
8
  import pandas as pd
9
  import plotly.express as px
10
  import os
 
11
 
12
  FONTS_FOLDER = "fonts"
13
  NUM_IMAGES_PER_FONT = 5
@@ -16,7 +17,7 @@ def main():
16
  st.set_page_config(page_title='Order ID Finder', layout='wide')
17
  st.title('OCR + Font type demo')
18
 
19
- tabs = st.tabs(["Intro", "Find Order", "Try FLANN Matching"])
20
 
21
  with tabs[0]:
22
  col1, col2 = st.columns([1, 2])
@@ -54,14 +55,35 @@ def main():
54
  It calculates the threshold that minimizes the intra-class variance or maximizes the inter-class variance.
55
  By choosing the threshold that maximizes the inter-class variance, Otsu's thresholding effectively separates the two classes, resulting in a binary image.</p> """, unsafe_allow_html=True)
56
 
57
- colab_link = '[<img src="https://colab.research.google.com/assets/colab-badge.svg">](https://colab.research.google.com/drive/1tq35g7ym1c73uDlAcy2KChIXsqlNY-RL?usp=sharing)'
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  st.markdown(colab_link, unsafe_allow_html=True)
59
 
60
 
61
 
62
  with tabs[1]:
63
  st.write('## Find Order')
64
- st.write('This is the find order tab')
 
 
 
 
 
 
 
 
65
 
66
  with st.sidebar:
67
  st.write('## Input')
@@ -80,7 +102,7 @@ def main():
80
  if st.button('Find Order ID by OCR + font type') and uploaded_file and input_file:
81
  st.write('## Output')
82
  model = tf.keras.models.load_model('model.h5')
83
- result = find_order_id(uploaded_file, input_file, model, ocre)
84
  if result['status'] == 'success':
85
  st.success(result['message'])
86
  elif result['status'] == 'warning':
@@ -96,6 +118,10 @@ def main():
96
 
97
  with tabs[2]:
98
  st.write('## Try FLANN Matching')
 
 
 
 
99
  text_input = st.text_input("Enter your text:")
100
  upload_image = st.file_uploader("Choose an image:", type=["jpg", "jpeg", "png"])
101
  col1, col2, col3 = st.columns(3)
@@ -148,7 +174,62 @@ def main():
148
  st.plotly_chart(fig)
149
  max_match_font = avg_df['Match Percent'].idxmax()
150
  st.success(f"The most likely font type is: {max_match_font}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
 
152
 
153
  if __name__ == '__main__':
154
  main()
 
1
  import streamlit as st
2
  import tensorflow as tf
3
+ from button_click_alt import find_order_id_2
4
  from button_click_alt import find_order_id_similarity
5
  from flann import generate_images, flann_matching, flann_matching_3, flann_matching_alt, flann_matching_4
6
  import cv2
 
8
  import pandas as pd
9
  import plotly.express as px
10
  import os
11
+ import altair as alt
12
 
13
  FONTS_FOLDER = "fonts"
14
  NUM_IMAGES_PER_FONT = 5
 
17
  st.set_page_config(page_title='Order ID Finder', layout='wide')
18
  st.title('OCR + Font type demo')
19
 
20
+ tabs = st.tabs(["Intro", "Find Order", "Try FLANN Matching", "Results"])
21
 
22
  with tabs[0]:
23
  col1, col2 = st.columns([1, 2])
 
55
  It calculates the threshold that minimizes the intra-class variance or maximizes the inter-class variance.
56
  By choosing the threshold that maximizes the inter-class variance, Otsu's thresholding effectively separates the two classes, resulting in a binary image.</p> """, unsafe_allow_html=True)
57
 
58
+ st.subheader("FLANN Matching")
59
+ st.markdown("""<p>FLANN (Fast Library for Approximate Nearest Neighbors) is a popular library for performing fast and efficient nearest neighbor searches in high-dimensional spaces.
60
+ It is often used in computer vision tasks such as feature matching, where the goal is to find corresponding features between two images.
61
+ In feature matching, one commonly used algorithm is SIFT (Scale-Invariant Feature Transform), which extracts keypoint descriptors from an image.
62
+ However, because SIFT produces a large number of keypoints, it can be computationally expensive to match them between images.
63
+ To address this issue, the Lowe ratio test is often used in conjunction with SIFT and FLANN.
64
+ The ratio test involves comparing the distance between the two closest matches of a given keypoint descriptor.
65
+ If the ratio of these distances is below a certain threshold (typically 0.7), then the match is considered to be valid.
66
+ This helps to filter out false matches and improve the accuracy of the feature matching process.</p>
67
+ <p> The ORB descriptor provides information about the local orientation and intensity of image features, which can be used to identify reliable matching points between two images.
68
+ By using RANSAC to estimate the homography between the matched keypoints, we can eliminate outliers and improve the accuracy of the registration process.</P>
69
+ """, unsafe_allow_html=True)
70
+
71
+ colab_link = '[<img src="https://colab.research.google.com/assets/colab-badge.svg">](https://colab.research.google.com/drive/11aO-QNRl2qMK0tgJ03RvcRLUuUSPvWMc?usp=sharing)'
72
  st.markdown(colab_link, unsafe_allow_html=True)
73
 
74
 
75
 
76
  with tabs[1]:
77
  st.write('## Find Order')
78
+ st.markdown("""<p>The pretrained model has been trained with 21,000 synthetic generated images. The goal is to detect the font type from a given list of 21 font types.
79
+ The model comprises several layers, starting with a Conv2D layer with a filter size of 32 and activation function of ReLU.
80
+ Input shape is specified here. Batch normalization is added to standardize output values.
81
+ This layer is followed by another Conv2D layer with the same filter size, activation function, and batch normalization.
82
+ MaxPooling2D is used to reduce the dimensionality of the data, with a pool size of (2,2). Dropout is then applied, with a rate of 0.25.
83
+ This process is repeated with two more sets of Conv2D, BatchNormalization, MaxPooling2D and Dropout layers, increasing the filter size to 64 and 128 for each set respectively.
84
+ Finally, the model is flattened to transform the multidimensional output into a one-dimensional array, and a Dense layer with 512 nodes is added with an activation function of ReLU.
85
+ Batch normalization and dropout are also included in this layer. Finally, a last dense layer with a softmax activation function is added, which outputs the predicted class probabilities.</p>
86
+ """, unsafe_allow_html=True)
87
 
88
  with st.sidebar:
89
  st.write('## Input')
 
102
  if st.button('Find Order ID by OCR + font type') and uploaded_file and input_file:
103
  st.write('## Output')
104
  model = tf.keras.models.load_model('model.h5')
105
+ result = find_order_id_2(uploaded_file, input_file, model, ocre)
106
  if result['status'] == 'success':
107
  st.success(result['message'])
108
  elif result['status'] == 'warning':
 
118
 
119
  with tabs[2]:
120
  st.write('## Try FLANN Matching')
121
+ st.markdown("""<p>Multiple images are generated for a given text, or detected text, with slight variations for each font type. Specifically, five images are created for each font, across all 21 font types.
122
+ To detect features, SIFT descriptors are utilized and matched using flann method.
123
+ Depending on the selected options, Lowe's ratio test, KNN matching, or ORB descriptor is then employed.
124
+ Average matching percentages are calculated for each font type, and the font type with the highest percentage is returned as the most likely one.</p>""", unsafe_allow_html=True)
125
  text_input = st.text_input("Enter your text:")
126
  upload_image = st.file_uploader("Choose an image:", type=["jpg", "jpeg", "png"])
127
  col1, col2, col3 = st.columns(3)
 
174
  st.plotly_chart(fig)
175
  max_match_font = avg_df['Match Percent'].idxmax()
176
  st.success(f"The most likely font type is: {max_match_font}")
177
+
178
+ with tabs[3]:
179
+ st.title('Results')
180
+ df = pd.read_csv('re.csv')
181
+ st.dataframe(df)
182
+
183
+ def calculate_accuracy(df, method):
184
+ correct = df['correct font']
185
+ predicted = df[method]
186
+ accuracy = np.mean(correct == predicted)
187
+ return round(accuracy, 3)
188
+
189
+ col1, col2 = st.columns(2)
190
+ with col1:
191
+ data = pd.DataFrame({
192
+ 'Method': ['CNN + OTSU', 'CNN Median blur + Adaptive thresh', 'FLANN + SIFT + LOWE'],
193
+ 'Accuracy': [calculate_accuracy(df, 'CNN + OTSU'),
194
+ calculate_accuracy(df, 'CNN Median blur + Adaptive thresh'),
195
+ calculate_accuracy(df, 'FLANN + SIFT + LOWE')]
196
+ })
197
+
198
+ bar_chart = alt.Chart(data).mark_bar().encode(
199
+ x='Method',
200
+ y='Accuracy',
201
+ color=alt.condition(
202
+ alt.datum.Accuracy >= 0.8,
203
+ alt.value('green'),
204
+ alt.value('red')
205
+ )
206
+ ).properties(title='Accuracy by Method')
207
+
208
+ st.altair_chart(bar_chart)
209
+
210
+ with col2:
211
+ font_counts = df.groupby(['correct font']).size().reset_index(name='counts')
212
+ font_counts_chart = alt.Chart(font_counts).mark_bar().encode(
213
+ x=alt.X('correct font', sort='-y'),
214
+ y='counts'
215
+ ).properties(title='Number of Correct Predictions by Font')
216
+
217
+ st.altair_chart(font_counts_chart)
218
+
219
+ # create a stacked bar chart showing the distribution of predicted fonts for each image
220
+ melted = df.melt(id_vars=['Images', 'correct font'], var_name='Method', value_name='Predicted Font')
221
+ melted_counts = melted.groupby(['Images', 'correct font', 'Predicted Font']).size().reset_index(name='counts')
222
+ stacked_bar_chart = alt.Chart(melted_counts).mark_bar().encode(
223
+ x=alt.X('Images', sort='-y'),
224
+ y='counts',
225
+ color='Predicted Font',
226
+ order=alt.Order(
227
+ 'Predicted Font',
228
+ sort='ascending'
229
+ )
230
+ ).properties(title='Distribution of Predicted Fonts by Image')
231
 
232
+ st.altair_chart(stacked_bar_chart)
233
 
234
  if __name__ == '__main__':
235
  main()
button_click_alt.py CHANGED
@@ -133,4 +133,69 @@ def find_order_id_similarity(uploaded_file, input_file, similarity_method, ocre)
133
  else:
134
  message = f'Detected Text: {text.strip()}\n, Could not find the Order ID.'
135
  result = {'status': 'error', 'message': message}
136
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  else:
134
  message = f'Detected Text: {text.strip()}\n, Could not find the Order ID.'
135
  result = {'status': 'error', 'message': message}
136
+ return result
137
+
138
+ def find_order_id_2(uploaded_file, input_file, model, ocre):
139
+ if ocre == 'Hive':
140
+ uploaded_image = Image.open(uploaded_file)
141
+ text = infer_text(uploaded_image)
142
+ else:
143
+ rotated = preprocess_image(uploaded_file)
144
+ text = pytesseract.image_to_string(rotated)
145
+
146
+ with input_file as file:
147
+ file_contents = file.read().decode()
148
+ lines = file_contents.split('\n')
149
+ found = False
150
+ possible_order_ids = []
151
+ for line in lines:
152
+ order_id, name, font = line.strip().split(',')
153
+ if name.strip() in text:
154
+ image = load_img(uploaded_file, target_size=(64, 64))
155
+ image = img_to_array(image)
156
+ image = np.expand_dims(image, axis=0)
157
+ image = image / 255.0
158
+ prediction = model.predict(image)
159
+
160
+ class_names = ['Allibretto1.8.otf', 'Bella1.1.otf', 'Buffalo Nickel1.2.otf', 'Cervanttis1.18.otf', 'Claster1.6.otf', 'Fairy4.5.otf', 'Mon-Amour-April1.7.otf', 'Mon-Amour-Aug1.1.otf', 'Mon-Amour-Dec1.2.otf', 'Mon-Amour-Feb1.1.otf', 'Mon-Amour-January1.2.otf', 'Mon-Amour-July1.1.otf', 'Mon-Amour-June1.1.otf', 'Mon-Amour-Mar1.2.otf', 'Mon-Amour-May1.1.otf', 'Mon-Amour-Nov1.1.otf', 'Mon-Amour-Oct1.1.otf', 'Mon-Amour-Sept1.1.otf', 'Mon-Amour2.3.otf', 'Shelby1.3.otf', 'UKIJJ-Quill1.7.otf']
161
+ predicted_class_index = np.argmax(prediction[0])
162
+ predicted_class_name = class_names[predicted_class_index]
163
+
164
+ if predicted_class_name.strip() == font.strip():
165
+ result = {
166
+ 'status': 'success',
167
+ 'message': f'Detected Text: {text.strip()}\n, Order ID: {order_id}, Predicted Font Type: {predicted_class_name.strip()}'
168
+ }
169
+ found = True
170
+ break
171
+ else:
172
+ possible_order_ids.append(order_id)
173
+
174
+ if not found:
175
+ image = load_img(uploaded_file, target_size=(64, 64))
176
+ image = img_to_array(image)
177
+ image = np.expand_dims(image, axis=0)
178
+ image = image / 255.0
179
+ prediction = model.predict(image)
180
+
181
+ class_names = ['Allibretto1.8.otf', 'Bella1.1.otf', 'Buffalo Nickel1.2.otf', 'Cervanttis1.18.otf', 'Claster1.6.otf', 'Fairy4.5.otf', 'Mon-Amour-April1.7.otf', 'Mon-Amour-Aug1.1.otf', 'Mon-Amour-Dec1.2.otf', 'Mon-Amour-Feb1.1.otf', 'Mon-Amour-January1.2.otf', 'Mon-Amour-July1.1.otf', 'Mon-Amour-June1.1.otf', 'Mon-Amour-Mar1.2.otf', 'Mon-Amour-May1.1.otf', 'Mon-Amour-Nov1.1.otf', 'Mon-Amour-Oct1.1.otf', 'Mon-Amour-Sept1.1.otf', 'Mon-Amour2.3.otf', 'Shelby1.3.otf', 'UKIJJ-Quill1.7.otf']
182
+ predicted_class_index = np.argmax(prediction[0])
183
+ predicted_class_name = class_names[predicted_class_index]
184
+
185
+ for line in lines:
186
+ order_id, name, font = line.strip().split(',')
187
+ if font.strip() == predicted_class_name.strip():
188
+ possible_order_ids.append(order_id)
189
+
190
+ if len(possible_order_ids) > 0:
191
+ result = {
192
+ 'status': 'warning',
193
+ 'message': f'Detected Text: {text.strip()}\n, Possible Order IDs: {",".join(possible_order_ids)}, Predicted Font Type: {predicted_class_name.strip()}'
194
+ }
195
+ else:
196
+ result = {
197
+ 'status': 'error',
198
+ 'message': f'Detected Text: {text.strip()}\n, Could not find the Order ID and possible font matches.'
199
+ }
200
+
201
+ return result
converted.png ADDED
model.h5 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:148ca2bd9a8292808dd8ab5e1cfde8164fc92c3a8658ea1007cd2f0b4caacd14
3
- size 8253400
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:638f6f58906d91e654ba974d39dfefd04d724cc5f544dc594d629d212f88be79
3
+ size 16325952
model_1.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:148ca2bd9a8292808dd8ab5e1cfde8164fc92c3a8658ea1007cd2f0b4caacd14
3
+ size 8253400
process.py CHANGED
@@ -11,16 +11,11 @@ def preprocess_image(image_file):
11
  img = cv2.imdecode(np.fromstring(image_file.read(), np.uint8), 1)
12
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
13
 
14
- # Thresholding
15
  _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
16
-
17
- # Image Denoising
18
  blur = cv2.GaussianBlur(thresh, (3,3), 0)
19
 
20
- # Image Binarization
21
  thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 4)
22
 
23
- # Skew Correction
24
  coords = np.column_stack(np.where(thresh > 0))
25
  angle = cv2.minAreaRect(coords)[-1]
26
  if angle < -45:
 
11
  img = cv2.imdecode(np.fromstring(image_file.read(), np.uint8), 1)
12
  gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
13
 
 
14
  _, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
 
 
15
  blur = cv2.GaussianBlur(thresh, (3,3), 0)
16
 
 
17
  thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 4)
18
 
 
19
  coords = np.column_stack(np.where(thresh > 0))
20
  angle = cv2.minAreaRect(coords)[-1]
21
  if angle < -45:
re.csv ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Images,correct font,CNN + OTSU,CNN Median blur + Adaptive thresh,FLANN + SIFT + LOWE
2
+ image01,Claster1.6.otf,Claster1.6.otf,Claster1.6.otf,Allibretto1.8.otf
3
+ image02,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
4
+ image03,Buffalo Nickel1.2.otf,Cervanttis1.18.otf,Cervanttis1.18.otf,Buffalo Nickel1.2.otf
5
+ image04,Fairy4.5.otf,Shelby1.3.otf,Mon-Amour2.3.otf,Shelby1.3.otf
6
+ image05,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf,Fairy4.5.otf
7
+ image06,UKIJJ-Quill1.7.otf,Claster1.6.otf,Claster1.6.otf,UKIJJ-Quill1.7.otf
8
+ image07,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
9
+ image08,Cervanttis1.18.otf,Claster1.6.otf,Claster1.6.otf,Cervanttis1.18.otf
10
+ image09,Buffalo Nickel1.2.otf,Mon-Amour-Dec1.2.otf,Mon-Amour-Dec1.2.otf,Buffalo Nickel1.2.otf
11
+ image10,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
12
+ image11,Mon Amour2.3.otf,Claster1.6.otf,Fairy4.5.otf,Mon-Amour-January1.2.otf
13
+ image12,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
14
+ image13,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
15
+ image14,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Mon-Amour-Sept1.1.otf
16
+ image15,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
17
+ image16,Claster1.6.otf,Fairy4.5.otf,Claster1.6.otf,Mon-Amour-April1.7.otf
18
+ image17,Fairy4.5.otf,Claster1.6.otf,Mon-Amour2.3.otf,Fairy4.5.otf
19
+ image18,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf
20
+ image19,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Mon-Amour-January1.2.otf
21
+ image20,Cervanttis1.18.otf,Claster1.6.otf,Claster1.6.otf,Mon-Amour-Aug1.1.otf
22
+ image21,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf,Fairy4.5.otf
23
+ image22,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
24
+ image23,Cervanttis1.18.otf,Claster1.6.otf,Fairy4.5.otf,Cervanttis1.18.otf
25
+ image24,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
26
+ image25,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
27
+ image26,Fairy4.5.otf,Fairy4.5.otf,Mon-Amour2.3.otf,Mon-Amour-May1.1.otf
28
+ image27,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf
29
+ image28,Fairy4.5.otf,Fairy4.5.otf,Mon-Amour2.3.otf,Fairy4.5.otf
30
+ image29,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
31
+ image30,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
32
+ image31,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
33
+ image32,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
34
+ image33,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf,Fairy4.5.otf
35
+ image34,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf,Fairy4.5.otf
36
+ image35,Fairy4.5.otf,Shelby1.3.otf,Claster1.6.otf,Allibretto1.8.otf
37
+ image36,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
38
+ image37,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
39
+ image38,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
40
+ image39,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
41
+ image40,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
42
+ image41,Fairy4.5.otf,Claster1.6.otf,Mon-Amour-Dec1.2.otf,Fairy4.5.otf
43
+ image42,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
44
+ image43,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
45
+ image44,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
46
+ image45,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf
47
+ image46,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf
48
+ image47,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
49
+ image48,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
50
+ image49,Claster1.6.otf,Fairy4.5.otf,Claster1.6.otf,UKIJJ-Quill1.7.otf
51
+ image50,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
52
+ image51,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
53
+ image52,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Allibretto1.8.otf
54
+ image53,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Shelby1.3.otf
55
+ image54,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf,Fairy4.5.otf
56
+ image55,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf
57
+ image56,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf,Fairy4.5.otf
58
+ image57,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
59
+ image58,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
60
+ image59,Cervanttis1.18.otf,Shelby1.3.otf,Fairy4.5.otf,Cervanttis1.18.otf
61
+ image60,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
62
+ image61,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Bella1.1.otf
63
+ image62,Allibretto1.8.otf,Claster1.6.otf,Claster1.6.otf,Allibretto1.8.otf
64
+ image63,Cervanttis1.18.otf,Fairy4.5.otf,Claster1.6.otf,Mon-Amour-Mar1.2.otf
65
+ image64,Claster1.6.otf,Claster1.6.otf,Claster1.6.otf,Shelby1.3.otf
66
+ image65,Claster1.6.otf,Claster1.6.otf,Mon-Amour-Dec1.2.otf,Claster1.6.otf
67
+ image66,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf,Mon-Amour-Aug1.1.otf
68
+ image67,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Cervanttis1.18.otf
69
+ image68,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Mon-Amour-June1.1.otf
70
+ image69,UKIJJ-Quill1.7.otf,Fairy4.5.otf,Mon-Amour2.3.otf,UKIJJ-Quill1.7.otf
71
+ image70,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Claster1.6.otf
72
+ image71,Cervanttis1.18.otf,Claster1.6.otf,Claster1.6.otf,Cervanttis1.18.otf
73
+ image72,Fairy4.5.otf,Fairy4.5.otf,Claster1.6.otf,Fairy4.5.otf
74
+ image73,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf,Fairy4.5.otf
75
+ image74,Allibretto1.8.otf,Mon-Amour-April1.7.otf,Mon-Amour-Sept1.1.otf,Allibretto1.8.otf
76
+ image75,Allibretto1.8.otf,Mon-Amour-January1.2.otf,Mon-Amour-January1.2.otf,Allibretto1.8.otf
77
+ image76,Allibretto1.8.otf,Mon-Amour-January1.2.otf,Fairy4.5.otf,Allibretto1.8.otf
78
+ image77,Allibretto1.8.otf,Mon-Amour-Sept1.1.otf,Mon-Amour-Sept1.1.otf,Allibretto1.8.otf
79
+ image78,Mon-Amour-April1.7.otf,Claster1.6.otf,Claster1.6.otf,Mon-Amour-April1.7.otf