#include <opencv2/opencv.hpp> #include "opencv2/core.hpp" #include "opencv2/imgproc.hpp"
using namespace cv; using namespace std;
Mat image = imread(Guitar.jpg");
if (image.empty()) ...
void show_image(Mat image) { namedWindow("window"); // Create a window
imshow(windowName, image); // Show our image inside the created window.
waitKey(0); // Wait for any keystroke in the window
destroyWindow(windowName);
}
Mat::Mat(int rows, int cols, int type, const Scalar& s)
CV_8UC1 CV_8UC3 CV_8SC1 CV_16UC1 CV_16SC1 CV_32SC1 CV_32FC1 CV_64FC1
void Mat::convertTo(OutputArray m, int rtype, double alpha=1, double beta=0 ) use -1 for the same type. alpha and beta are scale and offset
cvtColor(image, image, COLOR_BGR2GRAY); equalizeHist(image, dst); blur(image, dst, Size(3, 3)); GaussianBlur(image, dst, Size(5, 5), 0);
cvNot(img, img); erode(image, dst, getStructuringElement(MORPH_RECT, Size(5, 5))); dilate(...); open(src,element)=dilate(erode(src,element)); close(src,element)=erode(dilate(src,element));
threshold( src, dst, threshold_value, max_binary_value, threshold_type );
double compactness = kmeans(points, clusterCount, labels, TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 10, 1.0), 3, KMEANS_PP_CENTERS, centers);
using namespace cv::ximgproc;
int length_threshold = 10; float distance_threshold = 1.41421356f; double canny_th1 = 50.0; double canny_th2 = 50.0; int canny_aperture_size = 3; bool do_merge = false; Ptr fld = createFastLineDetector(length_threshold, distance_threshold, canny_th1, canny_th2, canny_aperture_size, do_merge); vector lines;
Mat bw = threshval < 128 ? (img < threshval) : (img > threshval); Mat labelImage(img.size(), CV_32S); int nLabels = connectedComponents(bw, labelImage, 8); std::vector colors(nLabels); colors[0] = Vec3b(0, 0, 0);//background for(int label = 1; label < nLabels; ++label){ colors[label] = Vec3b( (rand()&255), (rand()&255), (rand()&255) ); } Mat dst(img.size(), CV_8UC3); for(int r = 0; r < dst.rows; ++r){ for(int c = 0; c < dst.cols; ++c){ int label = labelImage.at(r, c); Vec3b &pixel = dst.at(r, c); pixel = colors[label];
Canny(blurImage, edge1, edgeThresh, edgeThresh*3, 3); Scharr(blurImage,dx,CV_16S,1,0); Scharr(blurImage,dy,CV_16S,0,1); Sobel( src, dx, CV_16SC1, 1, 0, 3 ); Sobel( src, dy, CV_16SC1, 0, 1, 3 );
Canny( dx,dy, edge2, edgeThreshScharr, edgeThreshScharr*3 );
int i = 3 bilateralFilter ( src, dst, i, i*2, i/2 );
filter2D(src, dst, ddepth, kernel, anchor = Point(-1,-1), delta = offset, int borderType = BORDER_DEFAULT )
pyrDown( src, src, Size( src.cols/2, src.rows/2 ) ); pyrUp( src, src, Size( src.cols/2, src.rows/2 ) );
void cv::matchTemplate ( InputArray image, InputArray templ, OutputArray result, int method, InputArray mask = noArray() ) After the function finishes the comparison, the best matches can be found as global minimums (when TM_SQDIFF was used) or maximums (when TM_CCORR or TM_CCOEFF was used) using the minMaxLoc function. SQDIFF is fastest. TM_CCOEFF good to match bright to bright and dark to dark. TM_CCORR to match bright to bright.