Skip to main content

JIRA AUTO CLOSE

JRPY

import pandas

import subprocess

import time

import logging

import sys

#Logging Initialization

logging.basicConfig(filename=’/data/jira_closure.log’,

                                       format='[%(asctime)s {%(pathname)s:%(lineno)d} :%(levelname)s ] \n %(message)s',

               datefmt='%m-%d-%Y %H:%M:%S',

               level=logging.INFO,

               filemode='w')

 

logger=logging.getLogger(__name__)

from datetime import datetime,timedelta

import json

from jira import JIRA,JIRAError

from master_key import GETPWD

 

statTM= datetime.now()

pwd=GETPWD()

if len(sys.argv) > 1:

    dry-run=sys.argv[1]

    logger.info(f"DEFAULT EXECUTION of {sys.argv[1]} in DRY RUN (FLAG {dry-run})")

else:

    dry-run="Y"

    logger.info(f"DEFAULT EXECUTION of {sys.argv[1]} in DRY RUN (FLAG {dry-run})")

 

def close_jira(jiraTKT,data):

    print(f"Going to Close JIRA {jiraTKT}")

    try:

        issue=jira.issue(jiraTKT)

        if issue.fields.status.name== 'Production':

            logger.info(f"JIRA {jiraTKT} already in Production Status - No Action to take")

        else:

            fixVersions=[]

            fixVersions.append({'name':'<fix version to use>'})

            issue.update(fields=('fixVersions':fixVersions})

            jira.add_comment(issue,f"JIRA BOT CLOSED \n {data}")

            jira.transition_issue(issue,u'Production')

            logger.info(f"JIRA BOT CLOSED {jiraTKT}")

    except JIRAError as e:

        logger.info(f"Failed to Close JIRA {jiraTKT} with ERROR {e.status_code} MESSAGE {e.text} ")

        sys.exit(1)

    return None

   

URL='https:jira.com'

#Hive Beeline Connection URL

hCon="jdbc:hive2://host:10000/db;principal=hive/_HOST@FQDN"

 

#Intantiate JIRA Connection

jira=JIRA(server=URL,basic_auth=('user',pwd))

 

toDAY=time.strftime('%Y%m%d_%H%M%S',time.localtime())

 

logger.info(f"STARTING SCRIPT {sys.argv[0]} - DATE OF RUN {toDAY}")

 

opLOC='/data/jira/out'

opFL=f"{opLOC}/hiveOP.csv"

 

hiveQRY="""WITH JIRCUR as (SELECT JIRA_TKT_NO,split(summary,'')[1] as tbl_name from tbl1

WHERE status = 'Open' and issue_type='Bug' and summary rlike('^Table [A-Z]\[a-z]|[0-9]* Not Current'))

SELECT JIRA_TKT_NO,tbl_name,b.run_date,b.table_name,b.log_table,b.last_received_date,b.source_system,b.load_status from JIRCUR a

INNER JOIN

tbl2 b

ON (trim(a.tbl_name)=trim(b.table_name))

WHERE b.load_status = 'PASS' and b.run_date = current_date

;"""

 

cmd=f'''beeline -u "{hCon}" --silent=true --outputformat=csv2 -e "{hiveQRY}" > {opLOC}/hive_tickets.txt '''

 

status,output=subprocess.getstatusoutput(cmd)

 

if status == 0:

    logger.info(f"BEELINE COMMAND COMPLETED SUCESSFULLY {cmd} ")

else:

    logger.info(f"BEELINE COMMAND {cmd} FAILED WITH STATUS {status}")

  

  

#Load Hive Output file to DF using pandas

df=pd.read_csv(f"{opLOC}/hive_tickets.txt",header=0)

df.columns=['jira_key','jira_tbl','run_date','table_name','log_table','last_received_date','source_system','load_status']

 

print(len(df))

 

if df.empty:

    logger.info(f"NO JIRA TICKET CANDIDATES FROM HIVE TABLE FOR {toDAY}")

    sys.exit(0)

   

#ITERATE over each row in dataframe and close jira ticket

 

for index,row in df.iterrows():

    if dry-run == 'Y':

        logger.info(f''' READY TO CLOSE \n JIRA ISSYE {row['jira_key']} \n DATA {df.iloc[[index]].to_markdown(index=False,tablefmt="jira")}''')

    else:

        logger.info(f"NOW GOING TO CLOSE JIRA {row['jira_key']}")

        close_jira(row['jira_key'],df.iloc[[index]].to_markdown(index=False,tablefmt="jira"))

 

 

logger.info(f"BOT SCRIPT {sys.argv[0]} COMPLETED EXECUTION IN (HH:MM:SS.ssss) :- {datetime.now() - statTM} ")

print(f"SCRIPT {sys.argv[0]} COMPLETED EXECUTION IN (HH:MM:SS.ssss) :- {datetime.now() - statTM} ")

print("BOT SCRIPT COMPLETED ")

  

 

 

Comments

Popular posts from this blog

LookML

  What Is LookML? LookML is a language for describing dimensions, aggregates, calculations, and data relationships in a SQL database. Looker uses a model written in LookML to construct SQL queries against a particular database. LookML Projects A LookML Project is a collection of model, view, and dashboard files that are typically version controlled together via a Git repository. The model files contain information about which tables to use, and how they should be joined together. The view files contain information about how to calculate information about each table (or across multiple tables if the joins permit them). LookML separates structure from content, so the query structure (how tables are joined) is independent of the query content (the columns to access, derived fields, aggregate functions to compute, and filtering expressions to apply). LookML separates content of queries from structure of queries SQL Queries Generated by Looker For data analysts, LookML fosters DRY style...

CSV to HTML Converter Shell Script

#Generic Converter from CSV to HTML #!/bin/bash usage () { cat <<EOF Usage:$(basename $0)[OPTIONS] input_file > ouptut.html Explicit Delimiter can be specified if not then it default to comma as delimiter Options: -d specify delimiter , instead of comma --head specified then treats first line as column header , <thead> and <th> tags --foot last line , <tfoot> and <th> tags Samples: 1.$(basename $0) input.csv Parse 'input.csv' with comma as delimiter and send HTML table to STDOUT 2. $(basename $0) -d '|' < input.csv > out.html Parse 'input.csv' with PIPE as delimiter and send HTML table to out.html 3. $(basename $0) -d '\t' --head --foot < input.tsv > out.html Parse 'input.tsv' , tab as delimiter process first and last lines as header and footer write to out.html ...

A Deep Dive Into Google BigQuery Architecture

Introduction Google’s BigQuery is an enterprise-grade cloud-native data warehouse. BigQuery was first launched as a service in 2010 with general availability in November 2011. Since inception, BigQuery has evolved into a more economical and fully-managed data warehouse which can run blazing fast interactive and ad-hoc queries on datasets of petabyte-scale. In addition, BigQuery now integrates with a variety of Google Cloud Platform (GCP) services and third-party tools which makes it more useful. BigQuery is serverless, or more precisely data warehouse as a service. There are no servers to manage or database software to install. BigQuery service manages underlying software as well as infrastructure including scalability and high-availability. The pricing model is quite simple - for every 1 TB of data processed you pay $5. BigQuery exposes simple client interface which enables users to run interactive queries. Overall, you don’t need to know much about underlying BigQuery architecture or...