您现在的位置是:首页 >技术教程 >C# - 图片抠像 - U2NET - 含模型 - 完整可运行网站首页技术教程
C# - 图片抠像 - U2NET - 含模型 - 完整可运行
简介C# - 图片抠像 - U2NET - 含模型 - 完整可运行
环境
.NET Framework 4.6.2 , X64 , VS2022
依赖
OpenCvSharp.dll,OpenCvSharp.Extensions.dll,Microsoft.ML.OnnxRuntime.dll,u2net_human_seg.onnx
效果
页面
全部逻辑代码
public partial class Form1 : Form
{
private string imgFile = "";
private string model_path = string.Empty;
private int modelSize = 0;
private SessionOptions options = null;
private InferenceSession onnx_session = null;
private List<NamedOnnxValue> input_ontainer = null;
private DenseTensor<float> input_tensor = null;
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
using (OpenFileDialog ofd = new OpenFileDialog())
{
ofd.Filter = "图像文件|*.jpg;*.png;";
if (ofd.ShowDialog() == DialogResult.OK)
{
imgFile = ofd.FileName;
}
}
if (string.IsNullOrEmpty(imgFile))
{
return;
}
do_rec();
}
private async void do_rec()
{
var imgBs = File.ReadAllBytes(imgFile);
var imgInit = Image.FromStream(new MemoryStream(imgBs)) as Bitmap;
pb_init.Image = imgInit;
Application.DoEvents();
if (input_tensor == null)
{
load_removebg();
}
var imgRst = await Task.Run(() =>
removebg(imgBs)
);
pb_rst.Image = imgRst;
Application.DoEvents();
}
private void load_removebg()
{
this.model_path = AppDomain.CurrentDomain.BaseDirectory + "\model\u2net_human_seg.onnx";
modelSize = 320;
//创建输出会话,用于输出模型读取信息
options = new SessionOptions();
options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_INFO;
options.AppendExecutionProvider_CPU(0);
onnx_session = new InferenceSession(model_path, options);
input_ontainer = new List<NamedOnnxValue>();
input_tensor = new DenseTensor<float>(new[] { 1, 3, modelSize, modelSize });
}
private unsafe Bitmap removebg(byte[] imgbs)
{
var image = Cv2.ImDecode(imgbs, ImreadModes.Color);
int oldwidth = image.Cols;
int oldheight = image.Rows;
var lenMin = Math.Min(oldwidth, oldheight);
image = image.Resize(new OpenCvSharp.Size(lenMin, lenMin));
int maxEdge = Math.Max(image.Rows, image.Cols);
float ratio = 1.0f * modelSize / maxEdge;
int newHeight = (int)(image.Rows * ratio);
int newWidth = (int)(image.Cols * ratio);
Mat resize_image = image.Resize(new OpenCvSharp.Size(newWidth, newHeight));
int width = resize_image.Cols;
int height = resize_image.Rows;
if (width != modelSize || height != modelSize)
{
resize_image = resize_image.CopyMakeBorder(0, modelSize - newHeight, 0, modelSize - newWidth, BorderTypes.Constant, new Scalar(255, 255, 255));
}
Cv2.CvtColor(resize_image, resize_image, ColorConversionCodes.BGR2RGB);
for (int y = 0; y < resize_image.Height; y++)
{
for (int x = 0; x < resize_image.Width; x++)
{
input_tensor[0, 0, y, x] = (resize_image.At<Vec3b>(y, x)[0] / 255f - 0.485f) / 0.229f;
input_tensor[0, 1, y, x] = (resize_image.At<Vec3b>(y, x)[1] / 255f - 0.456f) / 0.224f;
input_tensor[0, 2, y, x] = (resize_image.At<Vec3b>(y, x)[2] / 255f - 0.406f) / 0.225f;
}
}
string inputName = onnx_session.InputMetadata.Keys.First();
this.input_ontainer.Clear();
input_ontainer.Add(NamedOnnxValue.CreateFromTensor(inputName, input_tensor));
//运行 Inference 并获取结果
var result_infer = onnx_session.Run(input_ontainer);
//将输出结果转为DisposableNamedOnnxValue数组
var results_onnxvalue = result_infer.ToArray();
//读取第一个节点输出并转为Tensor数据
var result_tensors = results_onnxvalue[0].AsTensor<float>();
var result_array = result_tensors.ToArray();
float maxVal = result_array.Max();
float minVal = result_array.Min();
for (int i = 0; i < result_array.Length; i++)
{
result_array[i] = (result_array[i] - minVal) / (maxVal - minVal) * 255;
}
var result_image = new Mat(modelSize, modelSize, MatType.CV_32F, result_array);
Cv2.CvtColor(result_image, result_image, ColorConversionCodes.RGB2BGR);
#region 图像合并 和 尺寸恢复
var m1 = image;
var m2 = result_image;
m1 = m1.Resize(new OpenCvSharp.Size(oldwidth, oldheight));
m2 = m2.Resize(new OpenCvSharp.Size(oldwidth, oldheight));
using (var result = new Mat())
{
try
{
// 首先将m2转换为单通道灰度图
using (var grayMat = new Mat())
using (var normalizedMat = new Mat())
{
// 如果m2是3通道,先转为灰度图
if (m2.Channels() == 3)
{
Cv2.CvtColor(m2, grayMat, ColorConversionCodes.BGR2GRAY);
}
else
{
m2.CopyTo(grayMat);
}
// 将32F转换为8U,并进行归一化
if (grayMat.Type() == MatType.CV_32F || grayMat.Type() == MatType.CV_32FC3)
{
// 归一化到0-255范围
Cv2.Normalize(grayMat, normalizedMat, 0, 255, NormTypes.MinMax);
normalizedMat.ConvertTo(grayMat, MatType.CV_8UC1);
}
// 转换m1的颜色空间从BGR到RGB
using (var rgbMat = new Mat())
{
Cv2.CvtColor(m1, rgbMat, ColorConversionCodes.BGR2RGB);
// 分离RGB通道
var channels = rgbMat.Split();
// 创建包含4个通道的列表:RGB + Alpha
var allChannels = new List<Mat>();
// 按照RGBA顺序添加通道
allChannels.Add(channels[2]); // R
allChannels.Add(channels[1]); // G
allChannels.Add(channels[0]); // B
allChannels.Add(grayMat.Clone()); // Alpha
// 合并所有通道,创建RGBA图像
Cv2.Merge(allChannels.ToArray(), result);
// 清理分离的通道
foreach (var channel in channels)
{
channel.Dispose();
}
// 清理通道列表
foreach (var channel in allChannels)
{
channel.Dispose();
}
// 转换为Bitmap
return OpenCvSharp.Extensions.BitmapConverter.ToBitmap(result);
}
}
}
catch (Exception ex)
{
MessageBox.Show($"处理图像时出错: {ex.Message}
调试信息:
{ex.StackTrace}");
return null;
}
}
#endregion
}
}
Demo 下载地址:
风语者!平时喜欢研究各种技术,目前在从事后端开发工作,热爱生活、热爱工作。