Iris314's picture
Rename untitled1.py to app.py
cd3cf04 verified
# -*- coding: utf-8 -*-
"""Untitled1.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/11Qva5ddomzIbz6DxYcTc0-amujuGqXAM
"""
import math
import gradio as gr
import pandas as pd
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Load LLM
MODEL_ID = "HuggingFaceTB/SmolLM2-135M-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
pipe = pipeline(
task="text-generation",
model=AutoModelForCausalLM.from_pretrained(MODEL_ID),
tokenizer=tokenizer
)
# Beam Calculation (I-beam)
def ibeam_calc(L_m, bf_m, tf_m, tw_m, h_m, P_kN, E_GPa=200, Sy_MPa=250):
E = E_GPa * 1e9
Sy = Sy_MPa * 1e6
P = P_kN * 1e3
# Inertia
I = (bf_m*h_m**3)/12 - ((bf_m - tw_m)*(h_m - 2*tf_m)**3)/12
S = I / (h_m/2)
# Stress
M = P*L_m/4
sigma = M/S
sigma_MPa = sigma/1e6
# Deflection
delta = (P*L_m**3)/(48*E*I)
delta_mm = delta*1e3
# Limits
delta_allow = L_m/360
fos_yield = Sy/sigma if sigma > 0 else math.inf
fos_defl = delta_allow/delta if delta > 0 else math.inf
return dict(
results={
"σ_max [MPa]": sigma_MPa,
"FoS_yield": fos_yield,
"δ [mm]": delta_mm,
"FoS_defl": fos_defl,
},
verdict={
"passes_yield": sigma <= Sy,
"passes_serviceability": delta <= delta_allow,
}
)
# LLM Explanation
def llm_explain(results, inputs):
L, bf, tf, tw, h, P = inputs
r = results["results"]
v = results["verdict"]
system_prompt = "You are a structural engineer. Explain results concisely in one professional sentence."
user_prompt = (
f"I-beam length {L} m, load {P} kN, bf={bf} m, tf={tf} m, tw={tw} m, h={h} m.\n"
f"σ_max={r['σ_max [MPa]']:.2f} MPa, FoS_yield={r['FoS_yield']:.2f}, "
f"δ={r['δ [mm]']:.2f} mm, FoS_defl={r['FoS_defl']:.2f}. "
f"Yield check={'OK' if v['passes_yield'] else 'FAIL'}, "
f"Deflection check={'OK' if v['passes_serviceability'] else 'FAIL'}.\n"
"Explain verdict."
)
messages = [{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}]
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
out = pipe(prompt, max_new_tokens=128, temperature=0.5, return_full_text=False)
return out[0]["generated_text"]
# Run once
def run_once(L, bf, tf, tw, h, P):
d = ibeam_calc(L, bf, tf, tw, h, P)
df = pd.DataFrame([d["results"]])
narrative = llm_explain(d, [L, bf, tf, tw, h, P])
return df, narrative
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# I-beam Stress & Deflection Calculator")
with gr.Row():
L = gr.Number(value=6.0, label="Beam length L [m]")
P = gr.Number(value=50.0, label="Central load P [kN]")
with gr.Row():
bf = gr.Number(value=0.2, label="Flange width bf [m]")
tf = gr.Number(value=0.02, label="Flange thickness tf [m]")
tw = gr.Number(value=0.01, label="Web thickness tw [m]")
h = gr.Number(value=0.3, label="Beam height h [m]")
run_btn = gr.Button("Compute")
results_df = gr.Dataframe(label="Results", interactive=False)
explain_md = gr.Markdown(label="Explanation")
run_btn.click(fn=run_once, inputs=[L, bf, tf, tw, h, P], outputs=[results_df, explain_md])
gr.Examples(
examples=[
[6.0, 0.2, 0.02, 0.01, 0.3, 50.0],
[4.0, 0.15, 0.015, 0.008, 0.25, 30.0],
[8.0, 0.25, 0.02, 0.012, 0.35, 80.0],
],
inputs=[L, bf, tf, tw, h, P],
)
if __name__ == "__main__":
demo.launch(debug=True)