Initial commit - BraceIQMed platform with frontend, API, and brace generator

This commit is contained in:
2026-01-29 14:34:05 -08:00
commit 745f9f827f
187 changed files with 534688 additions and 0 deletions

View File

View File

@@ -0,0 +1,220 @@
import numpy as np
import math
def _create_angles_dict(pt, mt, tl):
"""
pt,mt,tl: tuple(2) that contains: (angle, [idxTop, idxBottom])
"""
return {
"pt": {
"angle": pt[0],
"idxs": [pt[1][0], pt[1][1]],
},
"mt": {
"angle": mt[0],
"idxs": [mt[1][0], mt[1][1]],
},
"tl": {
"angle": tl[0],
"idxs": [tl[1][0], tl[1][1]],
}
}
def _isS(p):
num = len(p)
ll = np.zeros([num-2,1])
for i in range(num-2):
ll[i] = (p[i][1]-p[num-1][1])/(p[0][1]-p[num-1][1]) - (p[i][0]-p[num-1][0])/(p[0][0]-p[num-1][0])
flag = np.sum(np.sum(np.dot(ll,ll.T))) != np.sum(np.sum(abs(np.dot(ll,ll.T))))
return(flag)
def cobb_angle_cal(landmark_xy, image_shape):
"""
`landmark_xy`: number[n]. [x1,x2,...,xn,y1,y2,...,yn], where
- `n` is even.
- 0 <= x <= W
- 0 <= y <= H
`image_shape`: (HEIGHT, WIDTH, CHANNELS) *only HEIGHT is important
Returns: Tuple(4): cobb_angles_list, angles_with_pos, curve_type, midpoint_lines.
- `cobb_angles_list` - For evaluating with ground-truth: ex. [0.50, 0.11, 0.33].
- `angles_with_pos` - dict of "pt", "mt", "tl", each with values for "angle" and "idxs".
- `curve_type` - "S" or "C".
- `midpoint_lines` - list of mid point line coordinates: ex. [[[x,y][x,y]], [[x,y][x,y]], ...].
"""
landmark_xy = list(landmark_xy) # input is list
ap_num = int(len(landmark_xy)/2) # number of points
vnum = int(ap_num / 4) # number of verts
first_half = landmark_xy[:ap_num]
second_half = landmark_xy[ap_num:]
# Values this function returns
cob_angles = np.zeros(3)
angles_with_pos = {}
curve_type = None
# Midpoints (2 points per vertebra)
mid_p_v = []
for i in range(int(len(landmark_xy)/4)):
x = first_half[2*i: 2*i+2]
y = second_half[2*i: 2*i+2]
row = [(x[0] + x[1]) / 2, (y[0] + y[1]) / 2]
mid_p_v.append(row)
mid_p = []
for i in range(int(vnum)):
x = first_half[4*i: 4*i+4]
y = second_half[4*i: 4*i+4]
point1 = [(x[0] + x[2]) / 2, (y[0] + y[2]) / 2]
point2 = [(x[3] + x[1]) / 2, (y[3] + y[1]) / 2]
mid_p.append(point1)
mid_p.append(point2)
# Line and Slope
vec_m = []
for i in range(int(len(mid_p)/2)):
points = mid_p[2*i: 2*i+2]
row = [points[1][0]-points[0][0], points[1][1]-points[0][1]]
vec_m.append(row)
mod_v = []
for i in vec_m:
row = [i[0]*i[0], i[1]*i[1]]
mod_v.append(row)
dot_v = np.dot(np.matrix(vec_m), np.matrix(vec_m).T)
mod_v = np.sqrt(np.sum(np.matrix(mod_v), axis=1))
dot_v = np.dot(np.matrix(vec_m), np.matrix(vec_m).T)
slopes = []
for i in vec_m:
slope = i[1]/i[0]
slopes.append(slope)
angles = np.clip(dot_v/np.dot(mod_v, mod_v.T), -1, 1)
angles = np.arccos(angles)
maxt = np.amax(angles, axis = 0)
pos1 = np.argmax(angles, axis = 0)
pt, pos2 = np.amax(maxt), np.argmax(maxt)
pt = pt*180/math.pi
cob_angles[0] = pt
if(_isS(mid_p_v)==False):
mod_v1 = np.sqrt(np.sum(np.multiply(np.matrix(vec_m[0]), np.matrix(vec_m[0]))))
mod_vs1 = np.sqrt(np.sum(np.multiply(np.matrix(vec_m[pos2]), np.matrix(vec_m[pos2])), axis=1))
mod_v2 = np.sqrt(np.sum(np.multiply(np.matrix(vec_m[int(vnum-1)]), np.matrix(vec_m[int(vnum-1)])), axis=1))
mod_vs2 = np.sqrt(np.sum(np.multiply(vec_m[pos1.item((0, pos2))], vec_m[pos1.item((0, pos2))])))
dot_v1 = np.dot(np.array(vec_m[0]), np.array(vec_m[pos2]).T)
dot_v2 = np.dot(np.array(vec_m[int(vnum-1)]), np.array(vec_m[pos1.item((0, pos2))]).T)
mt = np.arccos(np.clip(dot_v1/np.dot(mod_v1, mod_vs1.T), -1, 1))
tl = np.arccos(np.clip(dot_v2/np.dot(mod_v2, mod_vs2.T), -1, 1))
mt = mt*180/math.pi
tl = tl*180/math.pi
cob_angles[1] = mt
cob_angles[2] = tl
# DETECTION CASE 1: Spine Type C
angles_with_pos = _create_angles_dict(mt=(float(pt), [pos2, pos1.A1.tolist()[pos2]]), pt=(float(mt), [0, int(pos2)]), tl=(float(tl), [pos1.A1.tolist()[pos2], vnum-1]))
curve_type = "C"
else:
if(((mid_p_v[pos2*2][1])+mid_p_v[pos1.item((0, pos2))*2][1]) < image_shape[0]):
#Calculate Upside Cobb Angle
mod_v_p = np.sqrt(np.sum(np.multiply(vec_m[pos2], vec_m[pos2])))
mod_v1 = np.sqrt(np.sum(np.multiply(vec_m[0:pos2], vec_m[0:pos2]), axis=1))
dot_v1 = np.dot(np.array(vec_m[pos2]), np.array(vec_m[0:pos2]).T)
angles1 = np.arccos(np.clip(dot_v1/np.dot(mod_v_p, mod_v1.T), -1, 1))
CobbAn1, pos1_1 = np.amax(angles1, axis = 0), np.argmax(angles1, axis = 0)
mt = CobbAn1*180/math.pi
cob_angles[1] = mt
#Calculate Downside Cobb Angle
mod_v_p2 = np.sqrt(np.sum(np.multiply(vec_m[pos1.item((0, pos2))], vec_m[pos1.item((0, pos2))])))
mod_v2 = np.sqrt(np.sum(np.multiply(vec_m[pos1.item((0, pos2)):int(vnum)], vec_m[pos1.item((0, pos2)):int(vnum)]), axis=1))
dot_v2 = np.dot(np.array(vec_m[pos1.item((0, pos2))]), np.array(vec_m[pos1.item((0, pos2)):int(vnum)]).T)
angles2 = np.arccos(np.clip(dot_v2/np.dot(mod_v_p2, mod_v2.T), -1, 1))
CobbAn2, pos1_2 = np.amax(angles2, axis = 0), np.argmax(angles2, axis = 0)
tl = CobbAn2*180/math.pi
cob_angles[2] = tl
pos1_2 = pos1_2 + pos1.item((0, pos2)) - 1
# DETECTION CASE 2: Spine Type S, Up and Bottom
# print("case 2")
angles_with_pos = _create_angles_dict(mt=(float(pt), [pos2, pos1.A1.tolist()[pos2]]), pt=(float(mt), [int(pos1_1), int(pos2)]), tl=(float(tl), [pos1.A1.tolist()[pos2], int(pos1_2)]))
curve_type = "S"
else:
#Calculate Upside Cobb Angle
mod_v_p = np.sqrt(np.sum(np.multiply(vec_m[pos2], vec_m[pos2])))
mod_v1 = np.sqrt(np.sum(np.multiply(vec_m[0:pos2], vec_m[0:pos2]), axis=1))
dot_v1 = np.dot(np.array(vec_m[pos2]), np.array(vec_m[0:pos2]).T)
angles1 = np.arccos(np.clip(dot_v1/np.dot(mod_v_p, mod_v1.T), -1, 1))
CobbAn1 = np.amax(angles1, axis = 0)
pos1_1 = np.argmax(angles1, axis = 0)
mt = CobbAn1*180/math.pi
cob_angles[1] = mt
#Calculate Upper Upside Cobb Angle
mod_v_p2 = np.sqrt(np.sum(np.multiply(vec_m[pos1_1], vec_m[pos1_1])))
mod_v2 = np.sqrt(np.sum(np.multiply(vec_m[0:pos1_1+1], vec_m[0:pos1_1+1]), axis=1))
dot_v2 = np.dot(np.array(vec_m[pos1_1]), np.array(vec_m[0:pos1_1+1]).T)
angles2 = np.arccos(np.clip(dot_v2/np.dot(mod_v_p2, mod_v2.T), -1, 1))
CobbAn2, pos1_2 = np.amax(angles2, axis = 0), np.argmax(angles2, axis = 0)
tl = CobbAn2*180/math.pi
cob_angles[2] = tl
# pos1_2 = pos1_2 + pos1.item((0, pos2)) - 1
# DETECTION CASE 3: Spine Type S, Up and Bottom
# print("case 3")
angles_with_pos = _create_angles_dict(tl=(float(pt), [pos2, pos1.A1.tolist()[pos2]]), mt=(float(mt), [pos1_1, pos2]), pt=(float(tl), [int(pos1_2), int(pos1_1)]))
curve_type = "S"
midpoint_lines = []
for i in range(0,int(len(mid_p)/2)):
midpoint_lines.append([list(map(int, mid_p[i*2])), list(map(int, mid_p[i*2+1]))])
# Remove Numpy Values
cobb_angles_list = [float(c) for c in cob_angles]
for key in angles_with_pos.keys():
angles_with_pos[key]['angle'] = float(angles_with_pos[key]['angle'])
for i in range(len(angles_with_pos[key]['idxs'])):
angles_with_pos[key]['idxs'][i] = int(angles_with_pos[key]['idxs'][i])
return cobb_angles_list, angles_with_pos, curve_type, midpoint_lines
def keypoints_to_landmark_xy(keypoints):
"""
converts keypoints (from model)
[
[
[x,y],[x,y],[x,y],[x,y]
]
]
to
[x1,x2,x3,...,xn,y1,y2,y3,...,yn]
"""
x_points = []
for kps in keypoints:
for kp in kps:
x_points.append(kp[0])
y_points = []
for kps in keypoints:
for kp in kps:
y_points.append(kp[1])
landmark_xy = x_points + y_points
return landmark_xy

View File

@@ -0,0 +1,76 @@
import os
from pathlib import Path
# Keypoint RCNN Model
import torch
from torchvision.models.detection.rpn import AnchorGenerator
import torchvision
def _download_kprcnn_model():
print("DETA: Downloading Keypoint RCNN Model...")
from deta import Deta
deta = Deta(os.environ.get("DETA_ID"))
models = deta.Drive("models")
model_file = models.get('keypointsrcnn_weights.pt')
with open("models/keypointsrcnn_weights.pt", "wb+") as f:
for chunk in model_file.iter_chunks(1024):
f.write(chunk)
print("DETA: Keypoint RCNN model downloaded.")
model_file.close()
def get_kprcnn_model():
model_folder = Path("models")
if not model_folder.exists():
os.mkdir("models")
model_path = Path("models/keypointsrcnn_weights.pt")
# Download if the model does not exist
if model_path.is_file():
print("Keypoint RCNN Model is already downloaded.")
else:
print("Keypoint RCNN Model was NOT FOUND.")
_download_kprcnn_model()
num_keypoints = 4
anchor_generator = AnchorGenerator(sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.25, 0.5, 0.75, 1.0, 2.0, 3.0, 4.0))
model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=False,
pretrained_backbone=True,
num_keypoints=num_keypoints,
num_classes = 2, # Background is the first class, object is the second class
rpn_anchor_generator=anchor_generator)
if model_path:
state_dict = torch.load(model_path, map_location=torch.device('cpu'))
model.load_state_dict(state_dict)
return model
# YoloV5 Model
# def _download_detection_model():
# print("DETA: Downloading Object Detection Model...")
# from deta import Deta
# deta = Deta(os.environ.get("DETA_ID"))
# models = deta.Drive("models")
# model_file = models.get('detection_model.pt')
# with open("models/detection_model.pt", "wb+") as f:
# for chunk in model_file.iter_chunks(1024):
# f.write(chunk)
# print("DETA: Object Detection model downloaded.")
# model_file.close()
# def get_detection_model():
# model_folder = Path("models")
# if not model_folder.exists():
# os.mkdir("models")
# model_path = Path("models/detection_model.pt")
# # Download if the model does not exist
# if model_path.is_file():
# print("Detection Model is already downloaded.")
# else:
# print("Detection Model was NOT FOUND.")
# _download_detection_model()
# # Get model from path and return
# model = torch.hub.load('./yolov5', 'custom', path='./models/detection_model.pt', source='local')
# return model

View File

@@ -0,0 +1,158 @@
import torch
import torchvision
from torchvision.transforms import functional as F
import numpy as np
from scoliovis.get_model import get_kprcnn_model
# DOWNLOAD THE MODEL (but don't cache)
get_kprcnn_model()
def _filter_output(output):
# 1. Get Scores
scores = output['scores'].detach().cpu().numpy()
# 2. Get Indices of Scores over Threshold
high_scores_idxs = np.where(scores > 0.5)[0].tolist() # Indexes of boxes with scores > 0.5
# 3. Get Indices after Non-max Suppression
post_nms_idxs = torchvision.ops.nms(output['boxes'][high_scores_idxs], output['scores'][high_scores_idxs], 0.3).cpu().numpy() # Indexes of boxes left after applying NMS (iou_threshold=0.3)
# 4. Get final `bboxes` and `keypoints` and `scores` based on indices
np_keypoints = output['keypoints'][high_scores_idxs][post_nms_idxs].detach().cpu().numpy()
np_bboxes = output['boxes'][high_scores_idxs][post_nms_idxs].detach().cpu().numpy()
np_scores = output['scores'][high_scores_idxs][post_nms_idxs].detach().cpu().numpy()
# 5. Get the Top 17 Scores
sorted_scores_idxs = np.argsort(-1*np_scores) # descending
np_scores = scores[sorted_scores_idxs][:18]
np_keypoints = np.array([np_keypoints[idx] for idx in sorted_scores_idxs])[:18]
np_bboxes = np.array([np_bboxes[idx] for idx in sorted_scores_idxs])[:18]
# 6. Sort by ymin
# kp[0] is the first point in [p1,p2,p3,p4]
# kp[0][1] is the y1 in p1=[x1,y1,x2,y2]
ymins = np.array([kps[0][1] for kps in np_keypoints])
sorted_ymin_idxs = np.argsort(ymins) # ascending
np_scores = np.array([np_scores[idx] for idx in sorted_ymin_idxs])
np_keypoints = np.array([np_keypoints[idx] for idx in sorted_ymin_idxs])
np_bboxes = np.array([np_bboxes[idx] for idx in sorted_ymin_idxs])
# 7. Convert everything to List Instead of Numpy
keypoints_list = []
for kps in np_keypoints:
keypoints_list.append([list(map(float, kp[:2])) for kp in kps])
bboxes_list = []
for bbox in np_bboxes:
bboxes_list.append(list(map(int, bbox.tolist())))
scores_list = np_scores.tolist()
return bboxes_list, keypoints_list, scores_list
def predict(images):
"""
images:
> List of Tensors, shape=[C, W, H]. Values 0-1. |
> Numpy array of image |
> String path to image |
> List of String paths to images
returns (bboxes, keypoints, scores)[] of n=17
"""
device = torch.device('cpu')
model = get_kprcnn_model()
model.to(device)
model.eval()
# 1. Process `images`
images_input = [F.to_tensor(images)]
images_input = [image.to(device) for image in images_input]
# 2. Inference
with torch.no_grad():
outputs = model(images_input) # 3. get output
filtered_outputs = [_filter_output(output) for output in outputs]
return filtered_outputs
from scoliovis.cobb_angle_cal import cobb_angle_cal, keypoints_to_landmark_xy
def kprcnn_to_scoliovis_api_format(bboxes, keypoints, scores, image_shape):
"""
@params
- `bboxes, keypoints, scores` - outputs from the model
- `image_shape` - (HEIGHT, WIDTH, CHANNELS)
@returns {
`detections`: {
`class`: number,
`confidence`: number,
`name`: "vert",
`xmax`: number,
`xmin`: number,
`ymin`: number,
`ymax`: number
},
`normalized_detections`: **REMOVED**,
`landmarks`: [x,y,x,y,x,y,x,y,x,y,x,y],
`angles`: {
`pt`: {
`angle`: number,
`idxs`: [number, number]
},
`mt`: {
`angle`: number,
`idxs`: [number, number]
},
`tl`: {
`angle`: number,
`idxs`: [number, number]
}
},
`midpoint_lines`: [
[[x,y],[x,y]],
[[x,y],[x,y]],
[[x,y],[x,y]]
],
`curve_type`: "S" | "C"
}
"""
detections = []
for idx, bbox in enumerate(bboxes):
detections.append({
"class": 0,
"confidence": scores[idx],
"name": "vert",
"xmin": bbox[0],
"ymin": bbox[1],
"xmax": bbox[2],
"ymax": bbox[3],
})
landmarks = []
for kps in keypoints:
for kp in kps:
landmarks.append(kp[0])
landmarks.append(kp[1])
try:
_, angles, curve_type, midpoint_lines = cobb_angle_cal(keypoints_to_landmark_xy(keypoints), image_shape)
except:
curve_type = None
angles = None
midpoint_lines = None
print("Could not calculate Cobb Angle for this Image")
return {
"detections": detections,
"landmarks": landmarks,
"angles": angles,
"curve_type": curve_type,
"midpoint_lines": midpoint_lines,
}