2016-12-03 9 views
1

DemoImagetoText에 대한 건물 지침을 따랐습니다. Youtube DemoImagetoText를 성공적으로 빌드합니다. 그런 다음 다중 언어 OCR을 통해이 응용 프로그램을 개발하고 싶습니다. 여러 언어로 OCR을하고 싶다면이 코드에서 무엇을해야하며 변경해야합니다. 지금은다국어를하는 방법

public class main extends Activity { 

private CropImageView mCropImageView; 
Bitmap converted; 
EditText textView; 
private TessOCR mTessOCR; 
private Uri mCropImageUri; 
public static final String lang = "eng"; 
public static final String DATA_PATH = Environment.getExternalStorageDirectory().toString() + "/DemoOCR/"; 
private ProgressDialog mProgressDialog; 

@Override 
protected void onCreate(Bundle savedInstanceState) { 
    super.onCreate(savedInstanceState); 
    setContentView(R.layout.a_main); 
    textView = (EditText)findViewById(R.id.editText); 

    mCropImageView = (CropImageView) findViewById(R.id.CropImageView); 
    String[] paths = new String[] { DATA_PATH, DATA_PATH + "tessdata/" }; 

    for (String path : paths) { 
     File dir = new File(path); 
     if (!dir.exists()) { 
      if (!dir.mkdirs()) { 
       Log.v("Main", "ERROR: Creation of directory " + path + " on sdcard failed"); 
       break; 
      } else { 
       Log.v("Main", "Created directory " + path + " on sdcard"); 
      } 
     } 

    } 
    if (!(new File(DATA_PATH + "tessdata/" + lang + ".traineddata")).exists()) { 
     try { 

      AssetManager assetManager = getAssets(); 

      InputStream in = assetManager.open(lang + ".traineddata"); 
      //GZIPInputStream gin = new GZIPInputStream(in); 
      OutputStream out = new FileOutputStream(DATA_PATH 
        + "tessdata/" + lang + ".traineddata"); 

      // Transfer bytes from in to out 
      byte[] buf = new byte[1024]; 
      int len; 
      //while ((lenf = gin.read(buff)) > 0) { 
      while ((len = in.read(buf)) > 0) { 
       out.write(buf, 0, len); 
      } 
      in.close(); 
      //gin.close(); 
      out.close(); 

      // Log.v(TAG, "Copied " + lang + " traineddata"); 
     } catch (IOException e) { 
      // Log.e(TAG, "Was unable to copy " + lang + " traineddata " + e.toString()); 
     } 


    } 
    mTessOCR =new TessOCR(); 
} 

/** 
* On load image button click, start pick image chooser activity. 
*/ 
public void onLoadImageClick(View view) { 
    startActivityForResult(getPickImageChooserIntent(), 200); 
} 

/** 
* Crop the image and set it back to the cropping view. 
*/ 

public void onCropImageClick(View view) { 
    Bitmap cropped = mCropImageView.getCroppedImage(500, 500); 
    if (cropped != null) 
     mCropImageView.setImageBitmap(cropped); 

    //mImage.setImageBitmap(converted); 
    doOCR(convertColorIntoBlackAndWhiteImage(cropped)); 

} 

public void doOCR(final Bitmap bitmap) { 
    if (mProgressDialog == null) { 
     mProgressDialog = ProgressDialog.show(this, "Processing", 
       "Please wait...", true); 
     // mResult.setVisibility(V.ViewISIBLE); 


    } 
    else { 
     mProgressDialog.show(); 
    } 

    new Thread(new Runnable() { 
     public void run() { 

      final String result = mTessOCR.getOCRResult(bitmap).toLowerCase(); 


      runOnUiThread(new Runnable() { 

       @Override 
       public void run() { 
        // TODO Auto-generated method stub 
        if (result != null && !result.equals("")) { 
         String s = result.trim(); 
         textView.setText(result); 


        } 

        mProgressDialog.dismiss(); 
       } 

      }); 

     }; 
    }).start(); 


} 
private Bitmap convertColorIntoBlackAndWhiteImage(Bitmap orginalBitmap) { 
    ColorMatrix colorMatrix = new ColorMatrix(); 
    colorMatrix.setSaturation(0); 

    ColorMatrixColorFilter colorMatrixFilter = new ColorMatrixColorFilter(
      colorMatrix); 

    Bitmap blackAndWhiteBitmap = orginalBitmap.copy(
      Bitmap.Config.ARGB_8888, true); 

    Paint paint = new Paint(); 
    paint.setColorFilter(colorMatrixFilter); 

    Canvas canvas = new Canvas(blackAndWhiteBitmap); 
    canvas.drawBitmap(blackAndWhiteBitmap, 0, 0, paint); 

    return blackAndWhiteBitmap; 
} 
@Override 
protected void onActivityResult(int requestCode, int resultCode, Intent data) { 
    if (resultCode == Activity.RESULT_OK) { 
     Uri imageUri = getPickImageResultUri(data); 

     // For API >= 23 we need to check specifically that we have permissions to read external storage, 
     // but we don't know if we need to for the URI so the simplest is to try open the stream and see if we get error. 
     boolean requirePermissions = false; 
     if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M && 
       checkSelfPermission(Manifest.permission.READ_EXTERNAL_STORAGE) != PackageManager.PERMISSION_GRANTED && 
       isUriRequiresPermissions(imageUri)) { 

      // request permissions and handle the result in onRequestPermissionsResult() 
      requirePermissions = true; 
      mCropImageUri = imageUri; 
      requestPermissions(new String[]{Manifest.permission.READ_EXTERNAL_STORAGE}, 0); 
     } 

     if (!requirePermissions) { 
      mCropImageView.setImageUriAsync(imageUri); 
     } 
    } 
} 

@Override 
public void onRequestPermissionsResult(int requestCode, String permissions[], int[] grantResults) { 
    if (mCropImageUri != null && grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) { 
     mCropImageView.setImageUriAsync(mCropImageUri); 
    } else { 
     Toast.makeText(this, "Required permissions are not granted", Toast.LENGTH_LONG).show(); 
    } 
} 

/** 
* Create a chooser intent to select the source to get image from.<br/> 
* The source can be camera's (ACTION_IMAGE_CAPTURE) or gallery's (ACTION_GET_CONTENT).<br/> 
* All possible sources are added to the intent chooser. 
*/ 
public Intent getPickImageChooserIntent() { 

    // Determine Uri of camera image to save. 
    Uri outputFileUri = getCaptureImageOutputUri(); 

    List<Intent> allIntents = new ArrayList<>(); 
    PackageManager packageManager = getPackageManager(); 

    // collect all camera intents 
    Intent captureIntent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE); 
    List<ResolveInfo> listCam = packageManager.queryIntentActivities(captureIntent, 0); 
    for (ResolveInfo res : listCam) { 
     Intent intent = new Intent(captureIntent); 
     intent.setComponent(new ComponentName(res.activityInfo.packageName, res.activityInfo.name)); 
     intent.setPackage(res.activityInfo.packageName); 
     if (outputFileUri != null) { 
      intent.putExtra(MediaStore.EXTRA_OUTPUT, outputFileUri); 
     } 
     allIntents.add(intent); 
    } 

    // collect all gallery intents 
    Intent galleryIntent = new Intent(Intent.ACTION_GET_CONTENT); 
    galleryIntent.setType("image/*"); 
    List<ResolveInfo> listGallery = packageManager.queryIntentActivities(galleryIntent, 0); 
    for (ResolveInfo res : listGallery) { 
     Intent intent = new Intent(galleryIntent); 
     intent.setComponent(new ComponentName(res.activityInfo.packageName, res.activityInfo.name)); 
     intent.setPackage(res.activityInfo.packageName); 
     allIntents.add(intent); 
    } 

    // the main intent is the last in the list (fucking android) so pickup the useless one 
    Intent mainIntent = allIntents.get(allIntents.size() - 1); 
    for (Intent intent : allIntents) { 
     if (intent.getComponent().getClassName().equals("com.android.documentsui.DocumentsActivity")) { 
      mainIntent = intent; 
      break; 
     } 
    } 
    allIntents.remove(mainIntent); 

    // Create a chooser from the main intent 
    Intent chooserIntent = Intent.createChooser(mainIntent, "Select source"); 

    // Add all other intents 
    chooserIntent.putExtra(Intent.EXTRA_INITIAL_INTENTS, allIntents.toArray(new Parcelable[allIntents.size()])); 

    return chooserIntent; 
} 

/** 
* Get URI to image received from capture by camera. 
*/ 
private Uri getCaptureImageOutputUri() { 
    Uri outputFileUri = null; 
    File getImage = getExternalCacheDir(); 
    if (getImage != null) { 
     outputFileUri = Uri.fromFile(new File(getImage.getPath(), "pickImageResult.jpeg")); 
    } 
    return outputFileUri; 
} 

/** 
* Get the URI of the selected image from {@link #getPickImageChooserIntent()}.<br/> 
* Will return the correct URI for camera and gallery image. 
* 
* @param data the returned data of the activity result 
*/ 
public Uri getPickImageResultUri(Intent data) { 
    boolean isCamera = true; 
    if (data != null && data.getData() != null) { 
     String action = data.getAction(); 
     isCamera = action != null && action.equals(MediaStore.ACTION_IMAGE_CAPTURE); 
    } 
    return isCamera ? getCaptureImageOutputUri() : data.getData(); 
} 

/** 
* Test if we can open the given Android URI to test if permission required error is thrown.<br> 
*/ 
public boolean isUriRequiresPermissions(Uri uri) { 
    try { 
     ContentResolver resolver = getContentResolver(); 
     InputStream stream = resolver.openInputStream(uri); 
     stream.close(); 
     return false; 
    } catch (FileNotFoundException e) { 
     if (e.getCause() instanceof ErrnoException) { 
      return true; 
     } 
    } catch (Exception e) { 
    } 
    return false; 
} 
} 

, 난 랭 같은 하여 lang = "ENG + JPN""eng.traineddata"+ "는 jpn.traineddata" 그러한 LANG + ". traineddata"일부 traineddata 변경 변경 이미 자산에 훈련 된 데이터를 추가했지만 출력은 jpn 언어 (아직 eng입니다)이 아닙니다.

그런 다음 lang = "eng"을 lang = "jpn"한 언어로만 변경하지만 작동하지 않습니다. 아직도 eng

어떻게해야합니까? 나는 그들을 다루는 방법을 모른다. T^T 미리 감사드립니다.

Simple OCR Android App Using Tesseract과 tess-two 사이의 차이점을 알고 싶습니다. 그것은 같은 일을하지만 왜 그것들을 사용하는 코드는 동일하지 않습니다

그리고 나는 같은 일을 할 수있는 leptonica 대 opencv 사이의 차이점을 알고 싶습니다. 왜 대부분의 OCR이 leptonica를 개발할 것인가?

답변

0

먼저는 우선은 https://github.com/tesseract-ocr/tessdata 여기에서 그들을 downlaod 수있는 이미 훈련 된 파일을 downlaod하고 만든 다음 당신은 당신이 그 언어를 내가 할

+0

을 사용하려는 언어를 설정해야합니다 귀하의 폴더로 가져올 필요 그것은 이미 그것을 영어로 번역했습니다. 왜 그런지 몰라? – wanipook