mirror of
https://github.com/opencv/opencv_contrib.git
synced 2025-10-20 04:25:42 +08:00
text: fix valgrind errors, minor changes
This commit is contained in:
@@ -5,7 +5,6 @@ import os
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
print('\ndetect_er_chars.py')
|
||||
print(' A simple demo script using the Extremal Region Filter algorithm described in:')
|
||||
@@ -32,8 +31,8 @@ regions = cv2.text.detectRegions(gray,er1,er2)
|
||||
#Visualization
|
||||
rects = [cv2.boundingRect(p.reshape(-1, 1, 2)) for p in regions]
|
||||
for rect in rects:
|
||||
cv2.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (0, 0, 255), 2)
|
||||
img = img[:,:,::-1] #flip the colors dimension from BGR to RGB
|
||||
plt.imshow(img)
|
||||
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
|
||||
plt.show()
|
||||
cv2.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (0, 0, 0), 2)
|
||||
for rect in rects:
|
||||
cv2.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (255, 255, 255), 1)
|
||||
cv2.imshow("Text detection result", img)
|
||||
cv2.waitKey(0)
|
||||
|
@@ -275,24 +275,18 @@ void ERFilterNM::er_tree_extract( InputArray image )
|
||||
// the component stack
|
||||
vector<ERStat*> er_stack;
|
||||
|
||||
//the quads for euler number calculation
|
||||
unsigned char quads[3][4];
|
||||
quads[0][0] = 1 << 3;
|
||||
quads[0][1] = 1 << 2;
|
||||
quads[0][2] = 1 << 1;
|
||||
quads[0][3] = 1;
|
||||
quads[1][0] = (1<<2)|(1<<1)|(1);
|
||||
quads[1][1] = (1<<3)|(1<<1)|(1);
|
||||
quads[1][2] = (1<<3)|(1<<2)|(1);
|
||||
quads[1][3] = (1<<3)|(1<<2)|(1<<1);
|
||||
quads[2][0] = (1<<2)|(1<<1);
|
||||
quads[2][1] = (1<<3)|(1);
|
||||
// quads[2][2] and quads[2][3] are never used so no need to initialize them.
|
||||
// the quads for euler number calculation
|
||||
// quads[2][2] and quads[2][3] are never used.
|
||||
// The four lowest bits in each quads[i][j] correspond to the 2x2 binary patterns
|
||||
// Q_1, Q_2, Q_3 in the Neumann and Matas CVPR 2012 paper
|
||||
// (see in page 4 at the end of first column).
|
||||
// Q_1 and Q_2 have four patterns, while Q_3 has only two.
|
||||
|
||||
const int quads[3][4] =
|
||||
{
|
||||
{ 1<<3 , 1<<2 , 1<<1 , 1<<0 },
|
||||
{ (1<<2)|(1<<1)|(1), (1<<3)| (1<<1)|(1), (1<<3)|(1<<2)| (1), (1<<3)|(1<<2)|(1<<1) },
|
||||
{ (1<<2)|(1<<1) , (1<<3)| (1), /*unused*/-1, /*unused*/-1 }
|
||||
};
|
||||
|
||||
// masks to know if a pixel is accessible and if it has been already added to some region
|
||||
vector<bool> accessible_pixel_mask(width * height);
|
||||
@@ -392,8 +386,8 @@ void ERFilterNM::er_tree_extract( InputArray image )
|
||||
int non_boundary_neighbours = 0;
|
||||
int non_boundary_neighbours_horiz = 0;
|
||||
|
||||
unsigned char quad_before[4] = {0,0,0,0};
|
||||
unsigned char quad_after[4] = {0,0,0,0};
|
||||
int quad_before[4] = {0,0,0,0};
|
||||
int quad_after[4] = {0,0,0,0};
|
||||
quad_after[0] = 1<<1;
|
||||
quad_after[1] = 1<<3;
|
||||
quad_after[2] = 1<<2;
|
||||
@@ -542,9 +536,9 @@ void ERFilterNM::er_tree_extract( InputArray image )
|
||||
current_edge = boundary_edges[threshold_level].back();
|
||||
boundary_edges[threshold_level].erase(boundary_edges[threshold_level].end()-1);
|
||||
|
||||
while (boundary_pixes[threshold_level].empty() && (threshold_level < (255/thresholdDelta)+1))
|
||||
threshold_level++;
|
||||
|
||||
for (; threshold_level < (255/thresholdDelta)+1; threshold_level++)
|
||||
if (!boundary_pixes[threshold_level].empty())
|
||||
break;
|
||||
|
||||
int new_level = image_data[current_pixel];
|
||||
|
||||
|
Reference in New Issue
Block a user