Snippets Collections
# Install the latest release of Haystack in your own environment
#! pip install farm-haystack

# Install the latest main of Haystack
!pip install --upgrade pip
!pip install git+https://github.com/deepset-ai/haystack.git#egg=farm-haystack[colab]

# Imports needed to run this notebook

from pprint import pprint
from tqdm import tqdm
from haystack.nodes import QuestionGenerator, BM25Retriever, FARMReader
from haystack.document_stores import ElasticsearchDocumentStore
from haystack.pipelines import (
    QuestionGenerationPipeline,
    RetrieverQuestionGenerationPipeline,
    QuestionAnswerGenerationPipeline,
)
from haystack.utils import launch_es, print_questions
  
 # Option 2: In Colab / No Docker environments: Start Elasticsearch from source
! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.2-linux-x86_64.tar.gz -q
! tar -xzf elasticsearch-7.9.2-linux-x86_64.tar.gz
! chown -R daemon:daemon elasticsearch-7.9.2

import os
from subprocess import Popen, PIPE, STDOUT

es_server = Popen(
    ["elasticsearch-7.9.2/bin/elasticsearch"], stdout=PIPE, stderr=STDOUT, preexec_fn=lambda: os.setuid(1)  # as daemon
)
# wait until ES has started
! sleep 30

!wget https://dl.xpdfreader.com/xpdf-tools-linux-4.04.tar.gz
!tar -xvf xpdf-tools-linux-4.04.tar.gz && sudo cp xpdf-tools-linux-4.04/bin64/pdftotext /usr/local/bin

from haystack.nodes import TextConverter, PDFToTextConverter, DocxToTextConverter, PreProcessor


converter = TextConverter(remove_numeric_tables=True, valid_languages=["nl"])
doc_txt = converter.convert(file_path="/content/data/Chatbot_BVO DDK_22092022.txt", meta=None)[0]

from haystack.nodes import PreProcessor


# This is a default usage of the PreProcessor.
# Here, it performs cleaning of consecutive whitespaces
# and splits a single large document into smaller documents.
# Each document is up to 1000 words long and document breaks cannot fall in the middle of sentences
# Note how the single document passed into the document gets split into 5 smaller documents

preprocessor = PreProcessor(
    clean_empty_lines=True,
    clean_whitespace=True,
    clean_header_footer=False,
    split_by="word",
    split_length=100,
    split_respect_sentence_boundary=True,
    language="nl",
)
docs = preprocessor.process([doc_txt])
print(f"n_docs_input: 1\nn_docs_output: {len(docs)}")

# Initialize Question Generator
question_generator = QuestionGenerator()

# Fill the document store with a German document.
document_store.write_documents(docs)

# Load machine translation models
from haystack.nodes import TransformersTranslator
in_translator = TransformersTranslator(model_name_or_path="Helsinki-NLP/opus-mt-nl-en")
out_translator = TransformersTranslator(model_name_or_path="Helsinki-NLP/opus-mt-en-nl")

reader = FARMReader("deepset/roberta-base-squad2")
qag_pipeline = QuestionAnswerGenerationPipeline(question_generator, reader)

# Wrap the previously defined QuestionAnswerGenerationPipeline
from haystack.pipelines import TranslationWrapperPipeline

pipeline_with_translation = TranslationWrapperPipeline(
    input_translator=in_translator, output_translator=out_translator, pipeline=qag_pipeline
)

for idx, document in enumerate(tqdm(document_store)):
    print(f"\n * Generating questions and answers for document {idx}: {document.content[:100]}...\n")
    result = pipeline_with_translation.run(documents=[document])
    print_questions(result)
# Add the tables to the DocumentStore

import json
from haystack import Document
import pandas as pd


def read_tables(filename):
    processed_tables = []
    with open(filename) as tables:
        tables = json.load(tables)
        for key, table in tables.items():
            current_columns = table["header"]
            current_rows = table["data"]
            current_df = pd.DataFrame(columns=current_columns, data=current_rows)
            document = Document(content=current_df, content_type="table", id=key)
            processed_tables.append(document)

    return processed_tables


tables = read_tables(f"{doc_dir}/tables.json")
document_store.write_documents(tables, index=document_index)

# Showing content field and meta field of one of the Documents of content_type 'table'
print(tables[0].content)
print(tables[0].meta)
from IPython.display import YouTubeVideo


YOUTUBE_ID = 'xxxxxxxxxxxx'
YouTubeVideo(YOUTUBE_ID)

!rm -rf *.wav
!youtube-dl --extract-audio --audio-format wav --output "downloaded.%(ext)s" https://www.youtube.com/watch\?v\={YOUTUBE_ID}
!ffmpeg -loglevel panic -y -i downloaded.wav -acodec pcm_s16le -ac 1 -ar 16000 {project_name}/test.wav
find . -name '*.wav' -exec ffmpeg -i '{}' -i city-traffic-outdoor.mp3 -filter_complex "[0:a][1:a]amerge=inputs=2[a]" -map "[a]" -ar 16000 -ac 1 '{}.city_traffic_outdoor.wav' \;
find . -name '*.wav' -exec ffmpeg -i '{}' -filter:a "highpass=f=1200" '{}.phone.wav' \;
find . -name '*.wav' -exec ffmpeg -i '{}' -ar 16000 -ac 1 '{}.mono.wav' \;
star

Mon Nov 21 2022 17:28:49 GMT+0000 (Coordinated Universal Time)

#ffmpeg #audio #wav #convert
star

Tue Sep 13 2022 08:15:03 GMT+0000 (Coordinated Universal Time)

#ffmpeg #audio #wav #convert
star

Thu Aug 25 2022 17:29:51 GMT+0000 (Coordinated Universal Time)

#ffmpeg #audio #wav #convert
star

Thu Aug 18 2022 09:02:36 GMT+0000 (Coordinated Universal Time)

#ffmpeg #audio #wav #convert
star

Wed Aug 17 2022 06:53:03 GMT+0000 (Coordinated Universal Time)

#ffmpeg #audio #wav #convert
star

Sat Aug 13 2022 07:44:56 GMT+0000 (Coordinated Universal Time)

#ffmpeg #audio #wav #convert

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension