python search text in Excel workbook

created the script to search text in whole excel workbook, include all work sheets!

import sys
import openpyxl

wb = openpyxl.load_workbook(sys.argv[1])
worksheets = wb.get_sheet_names()
print(worksheets)
def wordfinder(ws, searchString):
    for i in range(1, ws.max_row + 1):
        for j in range(1, ws.max_column + 1):
            if str(ws.cell(i,j).value).find(searchString) != -1:
                print("found")
                print(ws.cell(i,j))   
                
for sheetname in worksheets:
    ws = wb.get_sheet_by_name(sheetname)
    #print(ws.title)
    wordfinder(ws, sys.argv[2])
C:\>python search_sheets.py hello.xlsx asdfa
['Impacted servers', 'Sheet1']
found
<Cell 'Sheet1'.A1>

python pip full command list

  1. list or check packages
$ pip list
$ pip search pkg
$ pip list --outdated
$ pip show pkg

2. download package

$ pip download --destination-directory /local/wheels -r requirements.txt
$ pip install --no-index --find-links=/local/wheels -r requirements.txt

create wheel file from downloaded package:
$ pip install wheel
$ pip wheel --wheel-dir=/local/wheels -r requirements.txt

3. install packages

install from local:
$ pip install --no-index --find-links=/local/wheels pkg

install with version:
$ pip install pkg>=2.1.2
$ pip install pkg==2.1.2

export installed pkgs and install it:
pip freeze >requirements.txt
pip install -r requirements.txt

4. install with proxy server

$ pip install --proxy [user:passwd@]http_server_ip:port pkg
or you can configure it in $HOME/.config/pip/pip.conf

# Linux/Unix:
/etc/pip.conf
~/.pip/pip.conf
~/.config/pip/pip.conf

# Mac OSX:
~/Library/Application Support/pip/pip.conf
~/.pip/pip.conf
/Library/Application Support/pip/pip.conf

# Windows:
%APPDATA%\pip\pip.ini
%HOME%\pip\pip.ini
C:\Documents and Settings\All Users\Application Data\PyPA\pip\pip.conf (Windows XP)
C:\ProgramData\PyPA\pip\pip.conf

Here is a sample pip.conf:
[global]
index-url = http://mirrors.aliyun.com/pypi/simple/ 

# change to your proxy[user:passwd@]proxy.server:port
proxy=http://xxx.xxx.xxx.xxx:8080 

[install]
trusted-host=mirrors.aliyun.com

5. upgrade and uninstall

$ pip install --upgrade pkg
$ pip install --upgrade pkg1 --upgrade-strategy only-if-need
$ pip uninstall pkg

create a flask application to monitor web url status

For each web environment we usually want to real time monitor our web link is down or not, here comes the solution:
1. build the url list
2. check the web link status
3. display the status on dashboard

STEP 1. Build the url list
We create load_urls.py to import list from csv file to sqlite db
run python3 load_list.py to import list

import sqlite3
import pandas as pd

# Import CSV
data = pd.read_csv('list.csv', engine='python')
df = pd.DataFrame(data)
print(df)
conn = sqlite3.connect('db.sqlite')
c = conn.cursor()

# delete all rows from table
c.execute('DELETE FROM User;',);
print('We have deleted', c.rowcount, 'records from the table.')

# Insert DataFrame to Table
for row in df.itertuples():
        c.execute("INSERT INTO User (app_name, app_url, app_status) VALUES (?,?,?)", (row.app_name, row.app_url, row.app_status))
conn.commit()

#close the connection
conn.close()

STEP 2. check the url status with curl command
setup cron job as below to do status check every 5 minutes:
*/5 * * * * /root/monitor/daily_check.sh >/dev/null 2>&1

import subprocess
from bootstrap_table import db, User
import pandas as pd

def api(cmd):
        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
        stderr=subprocess.PIPE, universal_newlines=True)
        stdout, stderr = p.communicate()
        return stdout

def remote_bash(app_url):
        curl_cmd = "curl -kIs " + app_url + " --connect-timeout 5 | head -1"
        return curl_cmd

def status_check(app_output):
        substring = "200"
        if app_output.find(substring) != -1:
                return "UP"
        else:
                return "DOWN"

for row in User.query.all():
        curl_cmd = remote_bash(row.app_url)
        curl_out = api(curl_cmd)
        if status_check(curl_out) != row.app_status:
                # send alerts to TEAMS channel
                JSON = "curl -H \'Content-Type: application/json\' -d \'{\"text\": \"%s %s is %s\"}\' https://labcorp.webhook.office.com/webhookb2/f3c6c02d-d1a-46cb-b304-84f4460b98b0@cdc1229-ac2a-4b97-b78a-0e5cacb5865c/IncomingWebhook/57b24246ed804247a2f60841256c6d5e/d8062d41-34c8-475d-81b2-04465b429007" % (row.app_name, row.app_url, status_check(curl_out))
                api(JSON)
        row.app_status = status_check(curl_out)
db.session.commit()

STEP 3. display the status on dashboard (bootstrap_table.py)

from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy

app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)

class User(db.Model):
    id = db.Column(db.Integer, primary_key=True)
    app_name = db.Column(db.String(100), index=True)
    app_url = db.Column(db.String(200), index=True)
    app_status = db.Column(db.String(20), index=True)
db.create_all()
@app.route('/')
def index():
    users = User.query
    return render_template('bootstrap_table.html', title='Application Status Monitor',
                           users=users)
if __name__ == '__main__':
    app.run(host='0.0.0.0')

bootstrap_table.html

{% extends "base.html" %}

{% block content %}
  <table id="data" class="table table-striped">
    <thead>
      <tr>
        <th>app_name</th>
        <th>app_url</th>
        <th>app_status</th>

      </tr>
    </thead>
    <tbody>
      {% for user in users %}
        <tr>
          <td>{{ user.app_name }}</td>
          <td>{{ user.app_url }}</td>

            {% if user.app_status == "UP" %}
                <td style="color:#00FF00"><strong>{{ user.app_status }}</strong></td>
            {% else %}
                <td style="color:#FF0000"><strong>{{ user.app_status }}</strong></td>
            {% endif %}

        </tr>
      {% endfor %}
    </tbody>
  </table>
{% endblock %}

and base.html

<!doctype html>
<html>
  <head>
    <title>{{ title }}</title>
    <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x" crossorigin="anonymous">
    <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.25/css/dataTables.bootstrap5.css">
  </head>
  <body>
    <div class="container">
      <h1>{{ title }}</h1>
      <hr>
      {% block content %}{% endblock %}
    </div>
    <script type="text/javascript" charset="utf8" src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
    <script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.25/js/jquery.dataTables.js"></script>
    <script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.25/js/dataTables.bootstrap5.js"></script>
    {% block scripts %}{% endblock %}
  </body>
</html>

Deploy Flask application From Git on OpenShift

Step 1. create Flask app on https://github.com/zhuby1973/code.git
Step 2. create an application From Git on OpenShift console

Step 3. input repository link, Context Dir and select python

Step 4. update service port from 8080 to 5000 once build

Step 5. verify the Flask application after restart

Step 6. you can edit the code in Git, then redeploy to get the application been updated!

Deploy Flask application From Dockerfile on OpenShift

Step 1. Create Dockerfile on https://github.com/IBM/deploy-python-openshift-tutorial.git

from alpine:latest
RUN apk add --no-cache py3-pip \
    && pip3 install --upgrade pip
WORKDIR /app
COPY . /app
RUN pip3 --no-cache-dir install -r requirements.txt
EXPOSE 5000
ENTRYPOINT ["python3"]
CMD ["helloworld.py"]

Step 2. create helloworld.py in same repository

from flask import Flask  # From module flask import class Flask
app = Flask(__name__)    # Construct an instance of Flask class for our webapp

@app.route('/')   # URL '/' to be handled by main() route handler
def main():
    """Say hello"""
    return 'Hello, world!'

if __name__ == '__main__':  # Script executed directly?
    print("Hello World! Built with a Docker file.")
    app.run(host="0.0.0.0", port=5000, debug=True,use_reloader=True)

Step 3. Create a demo project on OpenShift console and create an application with “From Dockerfile”

Step 4. input git url https://github.com/IBM/deploy-python-openshift-tutorial.git and port number 5000

Step 5. You will find new application once build completed

Step 6. open the url to verify the Flask web application

Work on OpenShift with CLI

Step 1. after download oc.exe from OpenShift website, you can get the login token from web console => Copy Login Command

Step 2. oc login and work on your project

C:\Hans>oc login --token=sha256~BOkusrH7Npa5N9OWp6WomU98jox8UwjPm --server=https://c109-e.us-east.containers.cloud.ibm.com:30955
Logged into "https://c109-e.us-east.containers.cloud.ibm.com:30955" as "IAM#rh-dev-1093" using the token provided.

You have access to 63 projects, the list has been suppressed. You can list all projects with ' projects'

Using project "default".
Welcome! See 'oc help' to get started.

C:\Hans>oc project workshop
Now using project "workshop" on server "https://c109-e.us-east.containers.cloud.ibm.com:30955".

C:\Hans>oc get pods
NAME                        READY   STATUS    RESTARTS   AGE
parksmap-56fcbb7db4-gqfdc   1/1     Running   0          2m25s
C:\Hans>oc get pod parksmap-56fcbb7db4-gqfdc -o yaml
apiVersion: v1
kind: Pod
metadata:
  annotations:
    cni.projectcalico.org/podIP: 172.30.218.69/32
    cni.projectcalico.org/podIPs: 172.30.218.69/32
......
C:\Hans>oc get deployment
NAME       READY   UP-TO-DATE   AVAILABLE   AGE
parksmap   1/1     1            1           6m34s

C:\Hans>oc get rs
NAME                  DESIRED   CURRENT   READY   AGE
parksmap-56fcbb7db4   1         1         1       6m43s

C:\Hans>oc scale --replicas=2 deployment/parksmap
deployment.apps/parksmap scaled

C:\Hans>oc get endpoints parksmap
NAME       ENDPOINTS                                AGE
parksmap   172.30.134.235:8080,172.30.218.69:8080   8m27s

C:\Hans>oc delete pod parksmap-56fcbb7db4-gqfdc
pod "parksmap-56fcbb7db4-gqfdc" deleted

C:\Hans>oc rollout restart deployment/parksmap
deployment.apps/parksmap restarted

C:\Hans>oc create -f https://raw.githubusercontent.com/openshift-labs/starter-guides/ocp-4.6/mongodb-template.yaml -n workshop
template.template.openshift.io/mongodb-ephemeral created

C:\Hans>oc rsh parksmap-78c58f896-mjqnw
sh-4.2$ ls /
anaconda-post.log  dev  home  lib64  mnt  parksmap.jar  root  sbin  sys  usr
bin                etc  lib   media  opt  proc          run   srv   tmp  var

C:\Hans>oc get route
NAME       HOST/PORT                                                                                                            PATH   SERVICES   PORT       TERMINATION   WILDCARD
parksmap   parksmap-workshop.rhd-wdc07-may19-3875203-4c50a18a6ae19b704aa10d04d75751f8-0000.us-east.containers.appdomain.cloud          parksmap   8080-tcp   edge          None
https://parksmap-workshop.rhd-wdc07-may19-3875203-4c50a18a6ae19b704aa10d04d75751f8-0000.us-east.containers.appdomain.cloud/
C:\Hans>oc create -f https://raw.githubusercontent.com/openshift-roadshow/mlbparks/master/ose3/application-template-eap.json -n workshop
template.template.openshift.io/mlbparks created

C:\Hans>oc get template
NAME                DESCRIPTION                                                                        PARAMETERS        OBJECTS
mlbparks            Application template MLBParks backend running on Wildfly and using mongodb         12 (2 blank)      8
mongodb-ephemeral   MongoDB database service, without persistent storage. For more information ab...   8 (3 generated)   3

C:\Hans>oc new-app mlbparks -p APPLICATION_NAME=mlbparks
--> Deploying template "workshop/mlbparks" to project workshop

     MLBparks
     ---------
     Application template MLBParks backend running on Wildfly and using mongodb

     * With parameters:
        * Application Name=mlbparks
        * Application route=
        * Mongodb App=mongodb-mlbparks
        * Git source repository=https://github.com/openshift-roadshow/mlbparks.git
        * Git branch/tag reference=master
        * Maven mirror url=
        * Database name=mongodb
        * Database user name=usergyY # generated
        * Database user password=PXKOdWS8 # generated
        * Database admin password=0BbQcb7j # generated
        * GitHub Trigger=2sfrBrUY # generated
        * Generic Trigger=whNYmASR # generated

--> Creating resources ...
    configmap "mlbparks" created
    service "mongodb-mlbparks" created
    deploymentconfig.apps.openshift.io "mongodb-mlbparks" created
    imagestream.image.openshift.io "mlbparks" created
    buildconfig.build.openshift.io "mlbparks" created
    deploymentconfig.apps.openshift.io "mlbparks" created
    service "mlbparks" created
    route.route.openshift.io "mlbparks" created
--> Success
    Build scheduled, use 'oc logs -f buildconfig/mlbparks' to track its progress.
    Access your application via route 'mlbparks-workshop.rhd-wdc07-may19-3875203-4c50a18a6ae19b704aa10d04d75751f8-0000.us-east.containers.appdomain.cloud'
    Run 'oc status' to view your app.

CLI commands to create route:
 $ oc create route reencrypt --service=docker-registry
 $ oc get route docker-registry

ref: https://redhat-scholars.github.io/openshift-starter-guides/rhs-openshift-starter-guides/4.6/index.html

Deploy Django application on OpenShift

OpenShift is a family of containerization software products developed by Red Hat. Its flagship product is the OpenShift Container Platform — an on-premises platform as a service built around Docker containers orchestrated and managed by Kubernetes on a foundation of Red Hat Enterprise Linux.

Step 1. Request a Developer Sandbox from Red Hat OpenShift website

Step 2. Create an application from a code sample (choose Python)

Step 3. It will deploy the python-sample app on OpenShift
please try to click “Open URL” once build completed

Step 4. Scale up with more pods

Step 5. Edit source code Button to open IDE

ref: Create an OpenShift cluster on Azure
(Azure OpenShift requires a minimum of 40 cores to create and run an OpenShift cluster.)
https://docs.microsoft.com/en-us/azure/openshift/tutorial-create-cluster
https://docs.microsoft.com/en-us/azure/openshift/tutorial-connect-cluster

Extract Data with Azure Form Recognizer

Step 1. clone the repository
https://github.com/MicrosoftLearning/AI-102-AIEngineer/21-custom-form

Step 2. create a Form Recognizer resource in Azure Portal

Step 3. setup python env
edit C:\Hans\AI-102-AIEngineer\21-custom-form\setup.cmd with your values:

rem Set variable values 
set subscription_id=YOUR_SUBSCRIPTION_ID
set resource_group=YOUR_RESOURCE_GROUP
set location=YOUR_LOCATION_NAME

Then run the command to create a SAS URI:
(base) C:\Users\Student\miniconda3\AI-102-AIEngineer\21-custom-form>az login
(base) C:\Users\Student\miniconda3\AI-102-AIEngineer\21-custom-form>setup.cmd
Creating storage...
Uploading files...
Finished[#############################################################]  100.0000%
-------------------------------------
SAS URI: https://ai102form7685119.blob.core.windows.net/sampleforms?se=2022-01-01T00%3A00%3A00Z&sp=rwl&sv=2018-11-09&sr=c&sig=Wopn1A5klioFouoyYKV57hrFIO7SbkGJmjZV%2BIe7R6I%3D

Step 4. Train a model
pip install azure-ai-formrecognizer==3.0.0
edit train-model.py with your endpoint, key and SAS URI:

import os 

from azure.core.exceptions import ResourceNotFoundError
from azure.ai.formrecognizer import FormRecognizerClient
from azure.ai.formrecognizer import FormTrainingClient
from azure.core.credentials import AzureKeyCredential

def main(): 
     
    try: 
    
        # Get configuration settings 
        ENDPOINT='https://hansformrecognizer.cognitiveservices.azure.com/'
        KEY='f20ca70a5497484c9f239d3431df2757'
        trainingDataUrl = 'https://ai102form2397530048.blob.core.windows.net/sampleforms?se=2022-01-01T00%3A00%3A00Z&sp=rwl&sv=2018-11-09&sr=c&sig=3LQtq9KfelRXPSf6aqVN/Z3UcIN7KE1Net76W6alTGg%3D'

        # Authenticate Form Training Client
        form_recognizer_client = FormRecognizerClient(ENDPOINT, AzureKeyCredential(KEY))
        form_training_client = FormTrainingClient(ENDPOINT, AzureKeyCredential(KEY))

        # Train model 
        poller = form_training_client.begin_training(trainingDataUrl, use_training_labels=False)
        model = poller.result()

        print("Model ID: {}".format(model.model_id))
        print("Status: {}".format(model.status))
        print("Training started on: {}".format(model.training_started_on))
        print("Training completed on: {}".format(model.training_completed_on))

    except Exception as ex:
        print(ex)

if __name__ == '__main__': 
    main()
PS C:\Hans\AI-102-AIEngineer\21-custom-form\Python\train-model> python .\train-model.py
Model ID: 37951e13-645e-4364-a93e-96bb1bccdb78
Status: ready
Training started on: 2021-05-06 15:48:40+00:00
Training completed on: 2021-05-06 15:48:51+00:00

Step 5. Test the model
edit test-model.py with your Model ID generated in previous step:

import os 

from azure.core.exceptions import ResourceNotFoundError
from azure.ai.formrecognizer import FormRecognizerClient
from azure.ai.formrecognizer import FormTrainingClient
from azure.core.credentials import AzureKeyCredential

def main(): 
       
    try: 
    
        # Get configuration settings 
        ENDPOINT='https://hansformrecognizer.cognitiveservices.azure.com/'
        KEY='f20ca70a5497484c9f239d3431df2757'
         
        # Create client using endpoint and key
        form_recognizer_client = FormRecognizerClient(ENDPOINT, AzureKeyCredential(KEY))
        form_training_client = FormTrainingClient(ENDPOINT, AzureKeyCredential(KEY))

        # Model ID from when you trained your model.
        model_id = '37951e13-645e-4364-a93e-96bb1bccdb78'

        # Test trained model with a new form 
        with open('test1.jpg', "rb") as f: 
            poller = form_recognizer_client.begin_recognize_custom_forms(
                model_id=model_id, form=f)

        result = poller.result()

        for recognized_form in result:
            print("Form type: {}".format(recognized_form.form_type))
            for name, field in recognized_form.fields.items():
                print("Field '{}' has label '{}' with value '{}' and a confidence score of {}".format(
                    name,
                    field.label_data.text if field.label_data else name,
                    field.value,
                    field.confidence
                ))

    except Exception as ex:
        print(ex)

if __name__ == '__main__': 
    main()

verify the app:

C:\Hans\AI-102-AIEngineer\21-custom-form\Python\test-model> python .\test-model.py
Form type: form-0
Field 'field-0' has label 'Hero Limited' with value 'accounts@herolimited.com' and a confidence score of 0.53
Field 'field-1' has label 'Company Phone:' with value '555-348-6512' and a confidence score of 1.0
Field 'field-2' has label 'Website:' with value 'www.herolimited.com' and a confidence score of 1.0
Field 'field-3' has label 'Email:' with value '49823 Major Ave Cheer, MS, 38601' and a confidence score of 0.53
Field 'field-4' has label 'Dated As:' with value '04/04/2020' and a confidence score of 1.0
Field 'field-5' has label 'Purchase Order #:' with value '3929423' and a confidence score of 1.0
Field 'field-6' has label 'Vendor Name:' with value 'Seth Stanley' and a confidence score of 0.53
Field 'field-7' has label 'Company Name:' with value 'Yoga for You' and a confidence score of 1.0
Field 'field-8' has label 'Address:' with value '343 E Winter Road' and a confidence score of 1.0
Field 'field-9' has label 'Seattle, WA 93849 Phone:' with value '234-986-6454' and a confidence score of 0.53
Field 'field-10' has label 'Name:' with value 'Josh Granger' and a confidence score of 0.86
Field 'field-11' has label 'Company Name:' with value 'Granger Supply' and a confidence score of 0.53
Field 'field-12' has label 'Address:' with value '922 N Ebby Lane' and a confidence score of 0.53
Field 'field-13' has label 'Phone:' with value '932-294-2958' and a confidence score of 1.0
Field 'field-14' has label 'SUBTOTAL' with value '$6750.00' and a confidence score of 1.0
Field 'field-15' has label 'TAX' with value '$600.00' and a confidence score of 1.0
Field 'field-16' has label 'TOTAL' with value '$7350.00' and a confidence score of 1.0
Field 'field-17' has label 'Additional Notes:' with value 'Enjoy. Namaste. If you have any issues with your Yoga supplies please contact us directly via email or at 250-209-1294 during business hours.' and a confidence score 
of 0.53

OCR and text read with Azure Computer Vision

Step 1. clone the repository
https://github.com/MicrosoftLearning/AI-102-AIEngineer/20-ocr

Step 2. edit read-text.py as below:

import os
import time
from PIL import Image, ImageDraw
from matplotlib import pyplot as plt

# import namespaces
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes
from msrest.authentication import CognitiveServicesCredentials

def main():
    global cv_client
    try:
        # Get Configuration Settings
        KEY='c36f33fd15784c3984c5a88dcde4c31c'
        ENDPOINT='https://ai102cg.cognitiveservices.azure.com/'

        # Authenticate Computer Vision client
        credential = CognitiveServicesCredentials(KEY) 
        cv_client = ComputerVisionClient(ENDPOINT, credential)
     
        # Menu for text reading functions
        print('1: Use OCR API\n2: Use Read API\n3: Read handwriting\nAny other key to quit')
        command = input('Enter a number:')
        if command == '1':
            image_file = os.path.join('images','Lincoln.jpg')
            GetTextOcr(image_file)
        elif command =='2':
            image_file = os.path.join('images','Rome.pdf')
            GetTextRead(image_file)
        elif command =='3':
            image_file = os.path.join('images','Note.jpg')
            GetTextRead(image_file)
                
    except Exception as ex:
        print(ex)

def GetTextOcr(image_file):
    print('Reading text in {}\n'.format(image_file))
    # Use OCR API to read text in image
    with open(image_file, mode="rb") as image_data:
        ocr_results = cv_client.recognize_printed_text_in_stream(image_data)

    # Prepare image for drawing
    fig = plt.figure(figsize=(7, 7))
    img = Image.open(image_file)
    draw = ImageDraw.Draw(img)

    # Process the text line by line
    for region in ocr_results.regions:
        for line in region.lines:

            # Show the position of the line of text
            l,t,w,h = list(map(int, line.bounding_box.split(',')))
            draw.rectangle(((l,t), (l+w, t+h)), outline='magenta', width=5)

            # Read the words in the line of text
            line_text = ''
            for word in line.words:
                line_text += word.text + ' '
            print(line_text.rstrip())

    # Save the image with the text locations highlighted
    plt.axis('off')
    plt.imshow(img)
    outputfile = 'ocr_results.jpg'
    fig.savefig(outputfile)
    print('Results saved in', outputfile)

def GetTextRead(image_file):
    print('Reading text in {}\n'.format(image_file))
    # Use Read API to read text in image
    with open(image_file, mode="rb") as image_data:
        read_op = cv_client.read_in_stream(image_data, raw=True)

        # Get the async operation ID so we can check for the results
        operation_location = read_op.headers["Operation-Location"]
        operation_id = operation_location.split("/")[-1]

        # Wait for the asynchronous operation to complete
        while True:
            read_results = cv_client.get_read_result(operation_id)
            if read_results.status not in [OperationStatusCodes.running, OperationStatusCodes.not_started]:
                break
            time.sleep(1)

        # If the operation was successfuly, process the text line by line
        if read_results.status == OperationStatusCodes.succeeded:
            for page in read_results.analyze_result.read_results:
                for line in page.lines:
                    print(line.text)

if __name__ == "__main__":
    main()

Step 3. verify the app

C:\Hans\AI-102-AIEngineer\20-ocr\Python\read-text> python .\read-text.py
1: Use OCR API
2: Use Read API
3: Read handwriting
Any other key to quit
Enter a number:3
Reading text in images\Note.jpg

Shopping List
Non-Fat milk
Bread
Eggs
Note.jpg

Analyze Images with Azure Computer Vision

Step 1. install lib:
pip install azure-cognitiveservices-vision-computervision==0.7.0

Step 2. create image-analysis.py and prepare a picture images/street.jpg

from dotenv import load_dotenv
import os
from array import array
from PIL import Image, ImageDraw
import sys
import time
from matplotlib import pyplot as plt
import numpy as np

# Import namespaces
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from msrest.authentication import CognitiveServicesCredentials

def main():
    global cv_client

    try:
        # Get Configuration Settings
        KEY='c36f33fd15784c3984c5a88dcde4c31c'
        ENDPOINT='https://ai102cg.cognitiveservices.azure.com/'
        # Get image
        image_file = 'images/street.jpg'
        if len(sys.argv) > 1:
            image_file = sys.argv[1]

        # Authenticate Computer Vision client
        credential = CognitiveServicesCredentials(KEY) 
        cv_client = ComputerVisionClient(ENDPOINT, credential)

        # Analyze image
        AnalyzeImage(image_file)

        # Generate thumbnail
        GetThumbnail(image_file)

    except Exception as ex:
        print(ex)

def AnalyzeImage(image_file):
    print('Analyzing', image_file)

    # Specify features to be retrieved
    features = [VisualFeatureTypes.description,
                VisualFeatureTypes.tags,
                VisualFeatureTypes.categories,
                VisualFeatureTypes.brands,
                VisualFeatureTypes.objects,
                VisualFeatureTypes.adult]
    
    # Get image analysis
    with open(image_file, mode="rb") as image_data:
        analysis = cv_client.analyze_image_in_stream(image_data , features)

    # Get image description
    for caption in analysis.description.captions:
        print("Description: '{}' (confidence: {:.2f}%)".format(caption.text, caption.confidence * 100))

    # Get image tags
    if (len(analysis.tags) > 0):
        print("Tags: ")
        for tag in analysis.tags:
            print(" -'{}' (confidence: {:.2f}%)".format(tag.name, tag.confidence * 100))

    # Get image categories (including celebrities and landmarks)
    if (len(analysis.categories) > 0):
        print("Categories:")
        landmarks = []
        celebrities = []
        for category in analysis.categories:
            # Print the category
            print(" -'{}' (confidence: {:.2f}%)".format(category.name, category.score * 100))
            if category.detail:
                # Get landmarks in this category
                if category.detail.landmarks:
                    for landmark in category.detail.landmarks:
                        if landmark not in landmarks:
                            landmarks.append(landmark)

                # Get celebrities in this category
                if category.detail.celebrities:
                    for celebrity in category.detail.celebrities:
                        if celebrity not in celebrities:
                            celebrities.append(celebrity)

        # If there were landmarks, list them
        if len(landmarks) > 0:
            print("Landmarks:")
            for landmark in landmarks:
                print(" -'{}' (confidence: {:.2f}%)".format(landmark.name, landmark.confidence * 100))

        # If there were celebrities, list them
        if len(celebrities) > 0:
            print("Celebrities:")
            for celebrity in celebrities:
                print(" -'{}' (confidence: {:.2f}%)".format(celebrity.name, celebrity.confidence * 100))

    # Get brands in the image

    if (len(analysis.brands) > 0):
        print("Brands: ")
        for brand in analysis.brands:
            print(" -'{}' (confidence: {:.2f}%)".format(brand.name, brand.confidence * 100))

    # Get objects in the image
    if len(analysis.objects) > 0:
        print("Objects in image:")

        # Prepare image for drawing
        fig = plt.figure(figsize=(8, 8))
        plt.axis('off')
        image = Image.open(image_file)
        draw = ImageDraw.Draw(image)
        color = 'cyan'
        for detected_object in analysis.objects:
            # Print object name
            print(" -{} (confidence: {:.2f}%)".format(detected_object.object_property, detected_object.confidence * 100))

            # Draw object bounding box
            r = detected_object.rectangle
            bounding_box = ((r.x, r.y), (r.x + r.w, r.y + r.h))
            draw.rectangle(bounding_box, outline=color, width=3)
            plt.annotate(detected_object.object_property,(r.x, r.y), backgroundcolor=color)
        # Save annotated image
        plt.imshow(image)
        outputfile = 'objects.jpg'
        fig.savefig(outputfile)
        print('  Results saved in', outputfile)

    # Get moderation ratings
    ratings = 'Ratings:\n -Adult: {}\n -Racy: {}\n -Gore: {}'.format(analysis.adult.is_adult_content,
                                                                        analysis.adult.is_racy_content,
                                                                        analysis.adult.is_gory_content)
    print(ratings)        

def GetThumbnail(image_file):
    print('Generating thumbnail')

    # Generate a thumbnail
    with open(image_file, mode="rb") as image_data:
        # Get thumbnail data
        thumbnail_stream = cv_client.generate_thumbnail_in_stream(100, 100, image_data, True)

    # Save thumbnail image
    thumbnail_file_name = 'thumbnail.png'
    with open(thumbnail_file_name, "wb") as thumbnail_file:
        for chunk in thumbnail_stream:
            thumbnail_file.write(chunk)

    print('Thumbnail saved in.', thumbnail_file_name)

if __name__ == "__main__":
    main()
images/street.jpg

Step 3. run it with:
python image-analysis.py images/street.jpg
you will get objects.jpg and thumbnail.png generated in same folder:

objects.jpg
thumbnail.png

ref: https://docs.microsoft.com/en-us/azure/cognitive-services/Face/Quickstarts/client-libraries?pivots=programming-language-python&tabs=visual-studio