Spaces:
Sleeping
Sleeping
Upload webapp.py
Browse files
webapp.py
CHANGED
|
@@ -9,25 +9,23 @@ import request_json.sbt_request_generator as sbt
|
|
| 9 |
import check_hkid_validity as chv
|
| 10 |
import av
|
| 11 |
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, WebRtcMode
|
|
|
|
|
|
|
| 12 |
|
| 13 |
-
def init():
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
|
| 19 |
-
|
| 20 |
|
| 21 |
-
|
| 22 |
|
| 23 |
-
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
| 24 |
-
|
| 25 |
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
return av.VideoFrame.from_ndarray(image, format="bgr24")
|
| 30 |
-
|
| 31 |
def main():
|
| 32 |
|
| 33 |
# st.title("SBT Web Application")
|
|
@@ -91,7 +89,9 @@ def main():
|
|
| 91 |
data = sc.get_data(image1, image2)
|
| 92 |
# se.get_data_link(data['chi_name_id'], data["name_on_id"], data["address"])
|
| 93 |
if 'data' in st.session_state:
|
|
|
|
| 94 |
st.session_state['data'] = data
|
|
|
|
| 95 |
st.success('Done!')
|
| 96 |
score = int(st.session_state['data']['similarity_score'])
|
| 97 |
st.text(f'score: {score}')
|
|
@@ -101,13 +101,15 @@ def main():
|
|
| 101 |
st.text(f'unmatched')
|
| 102 |
|
| 103 |
data = st.session_state['data']
|
| 104 |
-
st.header("
|
| 105 |
st.text(f'English Name: {data["name_on_id"]}') # name is without space
|
| 106 |
st.text(f'Chinese Name: {data["chi_name_id"]}') # name is without space
|
| 107 |
st.text(f'HKID: {data["hkid"]} and validity: {data["validity"]}')
|
| 108 |
st.text(f'Date of issue: {data["issue_date"]}')
|
|
|
|
|
|
|
| 109 |
|
| 110 |
-
st.header("
|
| 111 |
st.text(f'Name: {data["nameStatement"]}')
|
| 112 |
st.text(f'Address: {data["address"]}')
|
| 113 |
st.text(f'Bank: {data["bank"]}')
|
|
@@ -117,11 +119,11 @@ def main():
|
|
| 117 |
|
| 118 |
if 'data' in st.session_state:
|
| 119 |
tempout = st.session_state['data']
|
| 120 |
-
print(f'
|
| 121 |
|
| 122 |
|
| 123 |
-
st.header("II. Facial Recognition")
|
| 124 |
-
run = st.checkbox('Run')
|
| 125 |
|
| 126 |
# webrtc_streamer(key="example")
|
| 127 |
# 1. Web Rtc
|
|
@@ -129,60 +131,61 @@ def main():
|
|
| 129 |
|
| 130 |
|
| 131 |
# # init the camera
|
| 132 |
-
face_locations = []
|
| 133 |
# face_encodings = []
|
| 134 |
-
face_names = []
|
| 135 |
-
process_this_frame = True
|
| 136 |
|
| 137 |
-
score = []
|
| 138 |
|
| 139 |
-
faces = 0
|
| 140 |
|
| 141 |
-
FRAME_WINDOW = st.image([])
|
| 142 |
|
| 143 |
|
| 144 |
# server_ip = "127.0.0.1"
|
| 145 |
# server_port = 6666
|
| 146 |
|
| 147 |
-
# camera = cv2.VideoCapture(
|
| 148 |
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
| 149 |
# s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1000000)
|
| 150 |
|
| 151 |
# if "face_rec" not in st.session_state:
|
| 152 |
# st.session_state.face_rec = []
|
| 153 |
|
| 154 |
-
|
| 155 |
|
| 156 |
-
rtc_configuration = RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]})
|
| 157 |
|
| 158 |
-
# Capture frame-by-frame
|
| 159 |
-
# Grab a single frame of video
|
| 160 |
# ret, frame = camera.read()
|
| 161 |
-
|
| 162 |
-
#
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
|
|
|
| 172 |
|
| 173 |
# print(f'xd: look here {type(webrtc_ctx)}')
|
| 174 |
|
| 175 |
# st.session_state.face_rec = webrtc_ctx
|
| 176 |
|
| 177 |
-
if webrtc_ctx.video_transformer:
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
|
| 183 |
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 184 |
|
| 185 |
-
# FRAME_WINDOW.image(
|
| 186 |
|
| 187 |
# if ret is not None:
|
| 188 |
# ret, buffer = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY),30])
|
|
@@ -212,7 +215,17 @@ def main():
|
|
| 212 |
|
| 213 |
|
| 214 |
## unrelated
|
| 215 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
if st.button("Confirm"):
|
| 217 |
st.experimental_set_query_params(
|
| 218 |
verified=True,
|
|
@@ -221,7 +234,7 @@ def main():
|
|
| 221 |
print(st.session_state['data'])
|
| 222 |
sbt.split_data(st.session_state['data'])
|
| 223 |
st.success('Done!')
|
| 224 |
-
|
| 225 |
|
| 226 |
if __name__ == '__main__':
|
| 227 |
main()
|
|
|
|
| 9 |
import check_hkid_validity as chv
|
| 10 |
import av
|
| 11 |
from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, WebRtcMode
|
| 12 |
+
import search_engine as se
|
| 13 |
+
import get_bank_statement as bs
|
| 14 |
|
| 15 |
+
# def init():
|
| 16 |
+
# face_locations = []
|
| 17 |
+
# # face_encodings = []
|
| 18 |
+
# face_names = []
|
| 19 |
+
# process_this_frame = True
|
| 20 |
|
| 21 |
+
# score = []
|
| 22 |
|
| 23 |
+
# faces = 0
|
| 24 |
|
| 25 |
+
# def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
|
| 26 |
+
# image = frame.to_ndarray(format="bgr24")
|
| 27 |
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
def main():
|
| 30 |
|
| 31 |
# st.title("SBT Web Application")
|
|
|
|
| 89 |
data = sc.get_data(image1, image2)
|
| 90 |
# se.get_data_link(data['chi_name_id'], data["name_on_id"], data["address"])
|
| 91 |
if 'data' in st.session_state:
|
| 92 |
+
data["nationality"] = 'N/A' # for hkid
|
| 93 |
st.session_state['data'] = data
|
| 94 |
+
st.session_state['verified'] = "True"
|
| 95 |
st.success('Done!')
|
| 96 |
score = int(st.session_state['data']['similarity_score'])
|
| 97 |
st.text(f'score: {score}')
|
|
|
|
| 101 |
st.text(f'unmatched')
|
| 102 |
|
| 103 |
data = st.session_state['data']
|
| 104 |
+
st.header("Ia. HKID Data Extraction")
|
| 105 |
st.text(f'English Name: {data["name_on_id"]}') # name is without space
|
| 106 |
st.text(f'Chinese Name: {data["chi_name_id"]}') # name is without space
|
| 107 |
st.text(f'HKID: {data["hkid"]} and validity: {data["validity"]}')
|
| 108 |
st.text(f'Date of issue: {data["issue_date"]}')
|
| 109 |
+
st.text(f'Date of birth: {data["dateofbirth"]}')
|
| 110 |
+
st.text(f'nationality: {data["nationality"]}')
|
| 111 |
|
| 112 |
+
st.header("Ib. Bank Statement Data Extraction")
|
| 113 |
st.text(f'Name: {data["nameStatement"]}')
|
| 114 |
st.text(f'Address: {data["address"]}')
|
| 115 |
st.text(f'Bank: {data["bank"]}')
|
|
|
|
| 119 |
|
| 120 |
if 'data' in st.session_state:
|
| 121 |
tempout = st.session_state['data']
|
| 122 |
+
print(f'data: {tempout}')
|
| 123 |
|
| 124 |
|
| 125 |
+
# st.header("II. Facial Recognition")
|
| 126 |
+
# run = st.checkbox('Run')
|
| 127 |
|
| 128 |
# webrtc_streamer(key="example")
|
| 129 |
# 1. Web Rtc
|
|
|
|
| 131 |
|
| 132 |
|
| 133 |
# # init the camera
|
| 134 |
+
# face_locations = []
|
| 135 |
# face_encodings = []
|
| 136 |
+
# face_names = []
|
| 137 |
+
# process_this_frame = True
|
| 138 |
|
| 139 |
+
# score = []
|
| 140 |
|
| 141 |
+
# faces = 0
|
| 142 |
|
| 143 |
+
# FRAME_WINDOW = st.image([])
|
| 144 |
|
| 145 |
|
| 146 |
# server_ip = "127.0.0.1"
|
| 147 |
# server_port = 6666
|
| 148 |
|
| 149 |
+
# camera = cv2.VideoCapture(0)
|
| 150 |
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
| 151 |
# s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1000000)
|
| 152 |
|
| 153 |
# if "face_rec" not in st.session_state:
|
| 154 |
# st.session_state.face_rec = []
|
| 155 |
|
| 156 |
+
# while run:
|
| 157 |
|
| 158 |
+
# rtc_configuration = RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]})
|
| 159 |
|
| 160 |
+
# # Capture frame-by-frame
|
| 161 |
+
# # Grab a single frame of video
|
| 162 |
# ret, frame = camera.read()
|
| 163 |
+
|
| 164 |
+
# result = frame
|
| 165 |
+
# # Initialize the WebRTC streaming
|
| 166 |
+
# webrtc_ctx = webrtc_streamer(
|
| 167 |
+
# key="face_rec",
|
| 168 |
+
# mode=WebRtcMode.SENDRECV,
|
| 169 |
+
# rtc_configuration=rtc_configuration,
|
| 170 |
+
# # video_transformer_factory=WebcamTransformer,
|
| 171 |
+
# video_frame_callback=video_frame_callback,
|
| 172 |
+
# media_stream_constraints={"video": True, "audio": False},
|
| 173 |
+
# async_processing=True,
|
| 174 |
+
# )
|
| 175 |
|
| 176 |
# print(f'xd: look here {type(webrtc_ctx)}')
|
| 177 |
|
| 178 |
# st.session_state.face_rec = webrtc_ctx
|
| 179 |
|
| 180 |
+
# if webrtc_ctx.video_transformer:
|
| 181 |
+
# st.header("Webcam Preview")
|
| 182 |
+
# frame = webrtc_ctx.video_transformer.frame
|
| 183 |
+
# result, process_this_frame, face_locations, faces, face_names, score = demo.process_frame(frame, process_this_frame, face_locations, faces, face_names, score)
|
| 184 |
+
# st.video(result)
|
| 185 |
|
| 186 |
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 187 |
|
| 188 |
+
# FRAME_WINDOW.image(result)
|
| 189 |
|
| 190 |
# if ret is not None:
|
| 191 |
# ret, buffer = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY),30])
|
|
|
|
| 215 |
|
| 216 |
|
| 217 |
## unrelated
|
| 218 |
+
|
| 219 |
+
st.header("III. Search Engine and Bank Statement")
|
| 220 |
+
user_input_id = st.text_input("Enter the user ID here", " ")
|
| 221 |
+
if st.button("Search data"):
|
| 222 |
+
with st.spinner('Searching data...'):
|
| 223 |
+
se.get_data_link(user_input_id)
|
| 224 |
+
st.success('Done!')
|
| 225 |
+
if st.button("Fetch bank statement"):
|
| 226 |
+
with st.spinner('getting statements...'):
|
| 227 |
+
bs.get_bs(user_input_id)
|
| 228 |
+
st.success('Done!')
|
| 229 |
if st.button("Confirm"):
|
| 230 |
st.experimental_set_query_params(
|
| 231 |
verified=True,
|
|
|
|
| 234 |
print(st.session_state['data'])
|
| 235 |
sbt.split_data(st.session_state['data'])
|
| 236 |
st.success('Done!')
|
| 237 |
+
|
| 238 |
|
| 239 |
if __name__ == '__main__':
|
| 240 |
main()
|