Never been to TextSnippets before?

Snippets is a public source code repository. Easily build up your personal collection of code snippets, categorize them with tags / keywords, and share them with the world (or not, you can keep them private!)

« Newer Snippets
Older Snippets »
3 total  XML / RSS feed 

Force maven2 to download dependency sources

mvn eclipse:eclipse -Declipse.downloadSources=true

snippet

Download code snippets (http://textsnippets.com) from the command line and convert them to text files using man textutil (on Mac OS X 10.4).

Usage:
snippet 345
snippet 1268
snippet 1281



# $ cat $HOME/.bash_login

function snippet() {
   mkdir -p $HOME/Desktop/Snippets
   OPWD="$PWD"
   cd $HOME/Desktop/Snippets
   curl -L -O -s --max-time 25 http://textsnippets.com/posts/show/"$1" || exit 1
   file="$HOME/Desktop/Snippets/$(basename $_)"
   textutil -convert txt "$file"
   rm "$file"
   new_file="$file.txt"
   sed -i "" -e '1,5d' -e 's/.See related posts.//' "$new_file"  # delete first 5 lines of file
   sed -i "" -e :a -e '1,6!{P;N;D;};N;ba' "$new_file"            # delete last 6 lines of file
   cd "$OPWD"
   return 0
}


Download full-resolution images from Photoblog Sites (drrtydown)

Built specifically for these sites I used to frequent cause I couldn't be bothered to click on each and every picture in a 40-pic post.
Tested verified working with Superiorpics, Otvali, Friends.kz, Hollywoodtuna, Tunaflix. Methinks it doesn't work so well with imagevenue anymore.
NOTE: uses wget to download the pics. So I guess, Unix only. I run OS X so...

#! /usr/bin/env ruby

require 'open-uri'

$home_dir = `echo $HOME`.chomp
`mkdir #{$home_dir}/tmp`
`mkdir #{$home_dir}/.tmp`
$tmp_dir = $home_dir + "/.tmp"
Dir.chdir($tmp_dir)        # Where wget will download files before moving.

puts "Files will be stored under -> #{$home_dir}/tmp"

#-- Filters for others -- #

$otvali_re = /http:\/\/img.otvali.ru\/[0-9]{4}\/[0-9]{2}\/[a-z0-9_]+_[0-9]{1,2}\.jpg/
$friendz_re = /thumbs\/([a-z0-9_]+.jpg)/

# -- Superiorpics Filters -- #

$fake_link = /(http:\/\/img[0-9]+.image(shack|venue)+\.(us|com)+\/(my|img).php\?image=(\w+_?)+\.jpg)/
$shack_link = /(http:\/\/img[0-9]+.image(shack|venue)+\.(us|com)+\/(\w_?-?)+\/(\w_?-?)+\/(\w+_?)+\.jpg)/
$av_link = /src="((\w_?-?)+\/(\w_?-?)+\/(\w+_?)+\.jpg)/i

# -- Filters for HollywoodTuna -- #

$tuna_small = /(\/images\/(\w+_?)+\.jpg)/

def makeNewFolder
  puts "OK, where do you want to hide these? Or ENTER and I'll do it for you."
  answer = gets

  if answer !="\n"
    $new_folder_name = answer.gsub(/[\s\W]/, '_').chomp
  else
    $new_folder_name = File.basename($address, ".*").gsub(/[\s\W]/, '_')
  end

  $path_to_new_dir = $home_dir + "/tmp/" + $new_folder_name + "/"
  `mkdir #{$path_to_new_dir}`
  puts "Created: " + $new_folder_name
  puts "Path --> " + $path_to_new_dir
end

def cleanUp
  puts "Moving files to #{$path_to_new_dir}..."
  `mv #{$home_dir}/.tmp/*.jpg #{$path_to_new_dir}`
end

## Main Program ##

$pages = Array.new

puts "Enter URL:"
$address = gets.chomp

if $address =~ /otvali.ru/

  puts "Scanning page..."
  $pages = open($address).read.scan($otvali_re).delete_if { |x| x =~ /_s__/ } # Remove thumbnails. Their URLs contains "_s__"

elsif $address =~ /friends.kz/

  puts "Scanning page..."
  $pages = open($address).read.scan($friendz_re).flatten!.uniq
  $pages.collect! { |path| "http://friends.kz/uploads/posts/" + path }

elsif $address =~ /superiorpics/

  puts "Scanning page..."
  $pages = open($address).read.scan($fake_link).flatten!.delete_if { |i| i !~ /http/ }  # Remove all regex objects save the full URL
  $pages.collect! do |page|
    if page =~ /imageshack/
      puts "Resolving #{page}"
      open(page).read.scan($shack_link)[0][0]   # Returns the actual link buried under two arrays of Regex objects
    elsif page =~ /imagevenue/
      puts "Resolving #{page}"
      path_to_pic = open(page).read.scan($av_link)[0][0]
      av_direct = "http://" + URI.parse(page).host + "/" + path_to_pic
    end
  end

elsif $address =~ /(hollywoodtuna|tunaflix)/

  puts "Who do you want? ENTER to take'em all! Start with OMIT for negative search."
  search_string = gets.chomp.gsub(/ /, "|" ) # Prepare to feed into Regex
  tuna_want = Regexp.new(search_string)
  puts tuna_want
  puts "Scanning page..."
  $pages = open($address).read.scan($tuna_small).flatten!.delete_if { |i| i !~ /(th|tn|small)\.jpg$/ }
  $pages.collect! do |page|
        page.gsub!("images", "images/bigimages")
        page.gsub!(/(th|tn|small)\.jpg$/, "big.jpg")
        tuna_direct = URI.parse($address).host + page
      end
    
    if search_string =~ /^OMIT/
        $pages.delete_if { |i| i =~ tuna_want }
      else
        $pages.delete_if { |i| i !~ tuna_want }
    end
end

## Report total of found images, if nothing then exit the program.

$pages.uniq!

puts $pages

if $pages.length == 0
  puts "Yeah right. Nice try. Off you go!"
  exit
elsif $pages.length <= 20
  puts "What?! #{$pages.length}, is that all?"
elsif $pages.length >= 21
  puts "What're y'fuckin nuts??! #{$pages.length} !!!"
end

makeNewFolder

$threads = []

$down_count = 0

for pic_to_fetch in $pages
    $threads << Thread.new(pic_to_fetch) do |link|
      puts "Fetching #{link}"
      %x{wget --quiet --random-wait #{link}}
      $down_count += 1
      puts "Downloaded " + $down_count.to_s + "/" + $pages.length.to_s
  end 
end

$threads.each {|thr| thr.join }

dwn_actual = Dir.entries($tmp_dir).length-2

cleanUp

dwn_moved = Dir.entries($path_to_new_dir).length-2

if $pages.length == dwn_actual and dwn_actual == dwn_moved
  puts "All completed @ #{Time.now.hour}:#{Time.now.min}. See y'around, dirty boy."
elsif dwn_moved <= dwn_actual
  diff = dwn_actual - dwn_moved
  puts "#{diff} got lost in the moving, baby. Goodbye."
elsif dwn_actual <= $pages.length
  diff = $pages.length - dwn_actual
  puts "#{diff} got lost coming down. Goodbye."
end
« Newer Snippets
Older Snippets »
3 total  XML / RSS feed